Original Code File
stringlengths 196
31.9k
| Oniginal Ground Truth
stringlengths 78
32k
| Code
stringlengths 15
27.3k
| Unit Test
stringlengths 0
30.4k
|
---|---|---|---|
#include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
namespace {
absl::StatusOr<std::string> GetCompressionFunctionName(const GraphDef& graph) {
for (const auto& function : graph.library().function()) {
for (const auto& node : function.node_def()) {
if (node.op() == "CompressElement") {
return function.signature().name();
}
}
}
return errors::Internal("Compression function not found.");
}
absl::StatusOr<NodeDef> GetCompressionMapNode(const GraphDef& graph) {
TF_ASSIGN_OR_RETURN(std::string compression_function_name,
GetCompressionFunctionName(graph));
for (const auto& node : graph.node()) {
if (node.op() != "ParallelMapDatasetV2") {
continue;
}
if (auto it = node.attr().find("f");
it != node.attr().end() && it->second.has_func() &&
it->second.func().name() == compression_function_name) {
return node;
}
}
return errors::Internal("Compression map node not found.");
}
}
Status RemoveCompressionMap::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
TF_ASSIGN_OR_RETURN(NodeDef compression_map_node,
GetCompressionMapNode(*output));
MutableGraphView graph(output);
for (const auto& compression_map_output :
graph.GetFanout(graph.GetOutputPort(compression_map_node.name(), 0))) {
compression_map_output.node->clear_input();
compression_map_output.node->add_input(compression_map_node.input().Get(0));
++stats->num_changes;
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(RemoveCompressionMap, "remove_compression_map");
}
} | #include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::testing::HasSubstr;
TEST(RemoveCompressionMap, Success) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("Const/_4",
"Const",
{},
{{"dtype", DT_INT64},
{"value", -1}}),
graph_tests_utils::MakeParallelMapV2Node(
"ParallelMapDatasetV2/_5",
"RangeDataset/_3",
"Const/_4",
"__inference_Dataset_map_lambda_10",
"default"),
NDef("dataset",
"_Retval",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}})},
{FunctionDefHelper::Create(
"__inference_Dataset_map_lambda_10",
{"args_0: int64"},
{"identity: variant"},
{},
{
{{"CompressElement"},
"CompressElement",
{"args_0"},
{{"input_types", DT_INT64}}},
{{"Identity"},
"Identity",
{"CompressElement:compressed:0"},
{{"T", DT_VARIANT}}},
},
{})});
RemoveCompressionMap optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("dataset", output);
EXPECT_EQ(output.node(index).input(0), "RangeDataset/_3");
}
TEST(RemoveCompressionMap, FailureNoMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef({NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("dataset",
"_Retval",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}})});
RemoveCompressionMap optimizer;
GraphDef output;
ASSERT_THAT(optimizer.Optimize(nullptr, item, &output),
testing::StatusIs(error::INTERNAL,
HasSubstr("Compression function not found.")));
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
namespace {
absl::StatusOr<std::string> GetCompressionFunctionName(const GraphDef& graph) {
for (const auto& function : graph.library().function()) {
for (const auto& node : function.node_def()) {
if (node.op() == "CompressElement") {
return function.signature().name();
}
}
}
return errors::Internal("Compression function not found.");
} | TEST(RemoveCompressionMap, Success) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("Const/_4",
"Const",
{},
{{"dtype", DT_INT64},
{"value", -1}}),
graph_tests_utils::MakeParallelMapV2Node(
"ParallelMapDatasetV2/_5",
"RangeDataset/_3",
"Const/_4",
"__inference_Dataset_map_lambda_10",
"default"),
NDef("dataset",
"_Retval",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}})},
{FunctionDefHelper::Create(
"__inference_Dataset_map_lambda_10",
{"args_0: int64"},
{"identity: variant"},
{},
{
{{"CompressElement"},
"CompressElement",
{"args_0"},
{{"input_types", DT_INT64}}},
{{"Identity"},
"Identity",
{"CompressElement:compressed:0"},
{{"T", DT_VARIANT}}},
},
{})});
RemoveCompressionMap optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("dataset", output);
EXPECT_EQ(output.node(index).input(0), "RangeDataset/_3");
}
TEST(RemoveCompressionMap, FailureNoMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef({NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("dataset",
"_Retval",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}})});
RemoveCompressionMap optimizer;
GraphDef output;
ASSERT_THAT(optimizer.Optimize(nullptr, item, &output),
testing::StatusIs(error::INTERNAL,
HasSubstr("Compression function not found.")));
} |
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/device_propagation.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/lower_function_call_op.h"
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
constexpr const char* const kTpuReplicateAttr = "_tpu_replicate";
constexpr const char* const kXlaClusterAttr = "_xla_compile_id";
constexpr const char* const kXlaMustCompileAttr = "_XlaMustCompile";
bool CheckBoolAttr(const Node* n, absl::string_view attr_name) {
bool match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && match;
}
bool CheckStringAttr(const Node* n, absl::string_view attr_name) {
string match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && !match.empty();
}
bool LowerUsingSwitchMergeIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerUsingSwitchMergeAttr);
}
bool LowerAsMultiDeviceFunctionIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerAsMultiDeviceFunctionAttr);
}
bool MarkedForTpuCompilation(const Node* n) {
return CheckStringAttr(n, kTpuReplicateAttr);
}
bool MarkedForXlaCompilation(const Node* n) {
return CheckStringAttr(n, kXlaClusterAttr) ||
CheckBoolAttr(n, kXlaMustCompileAttr);
}
bool HasArgsOrRetvals(const Graph& g) {
for (const Node* n : g.op_nodes()) {
if (n->IsArg() || n->IsRetval()) return true;
}
return false;
}
const absl::flat_hash_set<std::string>& DevicePropagationOpList() {
static const auto op_list = new absl::flat_hash_set<std::string>(
{"Identity", "IdentityN", "Enter", "Exit", "Switch", "Merge",
"NextIteration"});
return *op_list;
}
bool IsPropagatableDevice(StringPiece device_string) {
DeviceNameUtils::ParsedName device;
return DeviceNameUtils::ParseFullName(device_string, &device) &&
device.type == DEVICE_TPU;
}
}
Status LowerFunctionalOpsPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.partition_graphs != nullptr) {
return errors::Internal(
"Lowering If/While ops should happen before partitioning.");
}
if (options.graph == nullptr) {
return absl::OkStatus();
}
Graph* g = options.graph->get();
if (g == nullptr) {
return errors::Internal(
"Lowering While op requires a graph to be available.");
}
FunctionLibraryDefinition* flib_def = options.flib_def;
if (flib_def == nullptr) {
return errors::Internal(
"Lowering If op requires a FunctionLibraryDefinition to be available.");
}
const bool lower_function_calls =
options.session_options && options.session_options->config.graph_options()
.optimizer_options()
.do_function_inlining();
bool keep_lowered_nodes_fetchable = !HasArgsOrRetvals(*g);
const bool functional_control_flow =
options.session_options &&
(options.session_options->config.experimental().executor_type() ==
"SINGLE_THREADED_EXECUTOR" ||
options.session_options->config.experimental().use_tfrt() ||
options.session_options->config.experimental()
.disable_functional_ops_lowering());
const auto used_by_xla = [](Node* node) -> bool {
return MarkedForTpuCompilation(node) || MarkedForXlaCompilation(node);
};
const auto lower_control_flow = [&](Node* node) -> bool {
return LowerUsingSwitchMergeIsOn(node) && !used_by_xla(node);
};
int num_node_ids_before_lowering = g->num_node_ids();
for (int i = 2; i < g->num_node_ids(); ++i) {
Node* n = g->FindNodeId(i);
if (n == nullptr) continue;
if (IsFunctionCall(*flib_def, *n) && !used_by_xla(n) &&
(lower_function_calls || LowerAsMultiDeviceFunctionIsOn(n))) {
TF_RETURN_IF_ERROR(RewriteFunctionCallNode(n, g, *flib_def,
keep_lowered_nodes_fetchable));
continue;
}
if (functional_control_flow) continue;
if (n->IsIfNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteIfNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsCaseNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteCaseNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsWhileNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(
RewriteWhileNode(n, g, flib_def, keep_lowered_nodes_fetchable));
} else {
DCHECK(!lower_control_flow(n))
<< "Node " << FormatNodeForError(*n) << " of type "
<< n->type_string() << " has '"
<< LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr
<< "' attr set but it does not support lowering.\n";
}
}
PropagateDevices(
[num_node_ids_before_lowering](const Node& n) {
return DevicePropagationOpList().contains(n.type_string()) &&
n.id() >= num_node_ids_before_lowering;
},
IsPropagatableDevice, g);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 10,
LowerFunctionalOpsPass);
} | #include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
typedef FunctionDefHelper FDH;
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr;
static void AssertHasSubstr(StringPiece s, StringPiece expected) {
ASSERT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
FunctionDef WhileWithIfCond(int32_t N) {
const Tensor kN = test::AsScalar<int32>(N);
return FDH::Define(
"WhileWithIfCond",
{"counter: int32", "pred: bool", "x: int32"},
{"z: bool"},
{},
{
{{"N"}, "Const", {}, {{"value", kN}, {"dtype", DT_INT32}}},
{{"z"}, "Less", {"counter", "N"}, {{"T", DT_INT32}}},
});
}
FunctionDef WhileWithIfBody() {
NameAttrList then_func;
then_func.set_name("XTimesTwo");
NameAttrList else_func;
else_func.set_name("XTimesFour");
const Tensor kOne = test::AsScalar<int32>(1);
std::vector<DataType> input_types = {DT_INT32};
std::vector<DataType> output_types = {DT_INT32};
return FDH::Define(
"WhileWithIfBody",
{"counter: int32", "pred: bool", "x: int32"},
{"updated_counter: int32", "pred: bool", "if: int32"},
{},
{
{{"if"},
"If",
{"pred", "x"},
{{"then_branch", then_func},
{"else_branch", else_func},
{"Tcond", DT_BOOL},
{"Tin", input_types},
{"Tout", output_types},
{kLowerUsingSwitchMergeAttr, true}}},
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_INT32}}},
{{"updated_counter"}, "Add", {"counter", "one"}, {{"T", DT_INT32}}},
});
}
TEST(LowerIfWhileTest, CondInWhile) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::XTimesFour();
*f_lib_proto.add_function() = WhileWithIfCond(3);
*f_lib_proto.add_function() = WhileWithIfBody();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto counter = ops::Placeholder(root.WithOpName("counter"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(counter.node()), NodeBuilder::NodeOut(pred.node()),
NodeBuilder::NodeOut(a.node())});
Node* while_node;
AttrValue cond_func;
cond_func.mutable_func()->set_name("WhileWithIfCond");
AttrValue body_func;
body_func.mutable_func()->set_name("WhileWithIfBody");
TF_ASSERT_OK(NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32, DT_BOOL, DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto* op : graph->op_nodes()) {
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 8);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 64);
}
}
FunctionDef IfWithWhileThen() {
NameAttrList cond_func;
cond_func.set_name("LessThanOrEqualToN");
NameAttrList body_func;
body_func.set_name("XTimesTwo");
std::vector<DataType> input_and_output_types = {DT_INT32};
std::vector<TensorShape> output_shapes = {TensorShape()};
return FDH::Define(
"IfWithWhileThen",
{"x: int32"},
{"while: int32"},
{},
{
{{"while"},
"While",
{"x"},
{{"cond", cond_func},
{"body", body_func},
{"T", input_and_output_types},
{"output_shapes", output_shapes},
{kLowerUsingSwitchMergeAttr, true}}},
});
}
TEST(LowerIfWhileTest, WhileInCond) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
*f_lib_proto.add_function() = IfWithWhileThen();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue then_func;
then_func.mutable_func()->set_name("IfWithWhileThen");
AttrValue else_func;
else_func.mutable_func()->set_name("XTimesTwo");
Node* if_node;
TF_ASSERT_OK(NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", then_func)
.Attr("else_branch", else_func)
.Attr("Tout", {DT_INT32})
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &if_node));
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
if (op->name() == "if") {
node_called_if_count++;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "if") {
node_called_if_count++;
}
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 2);
}
}
}
} | bool LowerUsingSwitchMergeIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerUsingSwitchMergeAttr);
} | TEST(LowerIfWhileTest, CondInWhile) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::XTimesFour();
*f_lib_proto.add_function() = WhileWithIfCond(3);
*f_lib_proto.add_function() = WhileWithIfBody();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto counter = ops::Placeholder(root.WithOpName("counter"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(counter.node()), NodeBuilder::NodeOut(pred.node()),
NodeBuilder::NodeOut(a.node())});
Node* while_node;
AttrValue cond_func;
cond_func.mutable_func()->set_name("WhileWithIfCond");
AttrValue body_func;
body_func.mutable_func()->set_name("WhileWithIfBody");
TF_ASSERT_OK(NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32, DT_BOOL, DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto* op : graph->op_nodes()) {
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 8);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 64);
}
}
TEST(LowerIfWhileTest, WhileInCond) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
*f_lib_proto.add_function() = IfWithWhileThen();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue then_func;
then_func.mutable_func()->set_name("IfWithWhileThen");
AttrValue else_func;
else_func.mutable_func()->set_name("XTimesTwo");
Node* if_node;
TF_ASSERT_OK(NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", then_func)
.Attr("else_branch", else_func)
.Attr("Tout", {DT_INT32})
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &if_node));
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
if (op->name() == "if") {
node_called_if_count++;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "if") {
node_called_if_count++;
}
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 2);
}
} |
#include "tsl/profiler/lib/profiler_factory.h"
#include <memory>
#include <utility>
#include <vector>
#include "tsl/platform/mutex.h"
#include "tsl/profiler/lib/profiler_controller.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
namespace tsl {
namespace profiler {
namespace {
mutex mu(LINKER_INITIALIZED);
std::vector<ProfilerFactory>* GetFactories() {
static auto factories = new std::vector<ProfilerFactory>();
return factories;
}
}
void RegisterProfilerFactory(ProfilerFactory factory) {
mutex_lock lock(mu);
GetFactories()->push_back(std::move(factory));
}
std::vector<std::unique_ptr<profiler::ProfilerInterface>> CreateProfilers(
const tensorflow::ProfileOptions& options) {
std::vector<std::unique_ptr<profiler::ProfilerInterface>> result;
mutex_lock lock(mu);
for (const auto& factory : *GetFactories()) {
auto profiler = factory(options);
if (profiler == nullptr) continue;
result.emplace_back(
std::make_unique<ProfilerController>(std::move(profiler)));
}
return result;
}
void ClearRegisteredProfilersForTest() {
mutex_lock lock(mu);
GetFactories()->clear();
}
}
} | #include "tsl/profiler/lib/profiler_factory.h"
#include <functional>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
class TestProfiler : public ProfilerInterface {
public:
absl::Status Start() override { return absl::OkStatus(); }
absl::Status Stop() override { return absl::OkStatus(); }
absl::Status CollectData(tensorflow::profiler::XSpace*) override {
return absl::OkStatus();
}
};
std::unique_ptr<ProfilerInterface> TestFactoryFunction(
const tensorflow::ProfileOptions& options) {
return absl::make_unique<TestProfiler>();
}
TEST(ProfilerFactoryTest, FactoryFunctionPointer) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory(&TestFactoryFunction);
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
TEST(ProfilerFactoryTest, FactoryLambda) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory([](const tensorflow::ProfileOptions& options) {
return absl::make_unique<TestProfiler>();
});
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
std::unique_ptr<ProfilerInterface> NullFactoryFunction(
const tensorflow::ProfileOptions& options) {
return nullptr;
}
TEST(ProfilerFactoryTest, FactoryReturnsNull) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory(&NullFactoryFunction);
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_TRUE(profilers.empty());
}
class FactoryClass {
public:
explicit FactoryClass(void* ptr) : ptr_(ptr) {}
FactoryClass(const FactoryClass&) = default;
FactoryClass(FactoryClass&&) = default;
std::unique_ptr<ProfilerInterface> CreateProfiler(
const tensorflow::ProfileOptions& options) const {
return absl::make_unique<TestProfiler>();
}
private:
void* ptr_ TF_ATTRIBUTE_UNUSED = nullptr;
};
TEST(ProfilerFactoryTest, FactoryClassCapturedByLambda) {
ClearRegisteredProfilersForTest();
static int token = 42;
FactoryClass factory(&token);
RegisterProfilerFactory([factory = std::move(factory)](
const tensorflow::ProfileOptions& options) {
return factory.CreateProfiler(options);
});
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
}
}
} | std::vector<std::unique_ptr<profiler::ProfilerInterface>> CreateProfilers(
const tensorflow::ProfileOptions& options) {
std::vector<std::unique_ptr<profiler::ProfilerInterface>> result;
mutex_lock lock(mu);
for (const auto& factory : *GetFactories()) {
auto profiler = factory(options);
if (profiler == nullptr) continue;
result.emplace_back(
std::make_unique<ProfilerController>(std::move(profiler)));
}
return result;
} | TEST(ProfilerFactoryTest, FactoryFunctionPointer) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory(&TestFactoryFunction);
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
TEST(ProfilerFactoryTest, FactoryLambda) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory([](const tensorflow::ProfileOptions& options) {
return absl::make_unique<TestProfiler>();
});
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
}
TEST(ProfilerFactoryTest, FactoryReturnsNull) {
ClearRegisteredProfilersForTest();
RegisterProfilerFactory(&NullFactoryFunction);
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_TRUE(profilers.empty());
}
TEST(ProfilerFactoryTest, FactoryClassCapturedByLambda) {
ClearRegisteredProfilersForTest();
static int token = 42;
FactoryClass factory(&token);
RegisterProfilerFactory([factory = std::move(factory)](
const tensorflow::ProfileOptions& options) {
return factory.CreateProfiler(options);
});
auto profilers = CreateProfilers(tensorflow::ProfileOptions());
EXPECT_EQ(profilers.size(), 1);
} |
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h"
namespace tensorflow {
namespace gpu {
constexpr int64_t kDefaultEstimateNs = 1;
ABSL_CONST_INIT int64_t (*NowNs)() = +[]() -> int64_t {
return absl::GetCurrentTimeNanos();
};
using DeviceStates = GpuServingDeviceSelector::DeviceStates;
GpuServingDeviceSelector::GpuServingDeviceSelector(
const int num_devices,
std::unique_ptr<ServingDeviceSelector::Policy> device_selector_policy)
: device_states_(num_devices),
device_selector_policy_(std::move(device_selector_policy)),
req_id_counter_(0) {}
tsl::DeviceReservation GpuServingDeviceSelector::ReserveDevice(
absl::string_view program_fingerprint) {
absl::MutexLock lock(&mu_);
DeviceStates device_states;
device_states.states = absl::Span<const DeviceState>(device_states_);
auto [it, emplaced] =
execution_info_.try_emplace(program_fingerprint, ExecutionInfo());
const int device_index =
device_selector_policy_->SelectDevice(program_fingerprint, device_states);
ServingDeviceSelector::EnqueueHelper(
device_states_.at(device_index), device_index, it->second,
program_fingerprint, 0, req_id_counter_++,
1, 0, NowNs());
return tsl::DeviceReservation(device_index, this);
}
void GpuServingDeviceSelector::FreeDeviceReservation(
const tsl::DeviceReservation& reservation) {
Completed(reservation.device_index());
}
void GpuServingDeviceSelector::Enqueue(int32_t index_on_host,
absl::string_view fingerprint) {
if (fingerprint.empty()) {
LOG(ERROR) << "Empty fingerprint.";
return;
}
absl::MutexLock lock(&mu_);
auto [it, emplaced] =
execution_info_.try_emplace(fingerprint, ExecutionInfo());
DeviceState& device_state = device_states_.at(index_on_host);
ServingDeviceSelector::EnqueueHelper(device_state, index_on_host, it->second,
fingerprint,
0, -1,
1,
0, NowNs());
int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs();
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set(
total_estimated_time_ns);
}
void GpuServingDeviceSelector::Completed(int32_t index_on_host,
bool had_error) {
absl::MutexLock lock(&mu_);
DeviceState& device_state = device_states_.at(index_on_host);
ServingDeviceSelector::CompletedHelper(device_state, index_on_host, 0,
min_exec_time_, had_error, NowNs());
int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs();
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set(
total_estimated_time_ns);
}
int64_t GpuServingDeviceSelector::TotalEstimatedTimeTillIdleNs() {
int64_t total_gpu_load_ns = 0;
for (const auto& device_state : device_states_) {
total_gpu_load_ns += ServingDeviceSelector::EstimateTimeTillIdleNs(
device_state, 0, min_exec_time_.value_or(kDefaultEstimateNs), NowNs());
}
return total_gpu_load_ns;
}
void GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(
int64_t (*now_ns)()) {
NowNs = now_ns;
}
}
} | #include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h"
namespace tensorflow {
namespace gpu {
class ServingDeviceSelectorTestHelper {
public:
ServingDeviceSelectorTestHelper() {
GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(NowNs);
now_ns_ = 0;
}
~ServingDeviceSelectorTestHelper() {
GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(
absl::GetCurrentTimeNanos);
}
static void ElapseNs(int64_t ns) { now_ns_ += ns; }
static int64_t NowNs() { return now_ns_; }
private:
static int64_t now_ns_;
};
int64_t ServingDeviceSelectorTestHelper::now_ns_ = 0;
namespace {
TEST(GpuServingDeviceSelector, Basic) {
GpuServingDeviceSelector selector(2,
std::make_unique<tsl::RoundRobinPolicy>());
const std::string program_fingerprint = "TensorFlow";
tsl::DeviceReservation reservation =
selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 1);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
}
TEST(GpuServingDeviceSelector, DefaultPolicyOnlyEnqueueCall) {
ServingDeviceSelectorTestHelper helper;
auto policy = std::make_unique<tsl::RoundRobinPolicy>();
auto serving_device_selector =
std::make_unique<tensorflow::gpu::GpuServingDeviceSelector>(
4, std::move(policy));
serving_device_selector->Enqueue(3, "16ms");
serving_device_selector->Enqueue(2, "8ms");
serving_device_selector->Enqueue(1, "4ms");
serving_device_selector->Enqueue(0, "2ms");
serving_device_selector->Enqueue(3, "16ms");
serving_device_selector->Enqueue(2, "8ms");
serving_device_selector->Enqueue(1, "4ms");
serving_device_selector->Enqueue(0, "2ms");
helper.ElapseNs(2e6);
serving_device_selector->Completed(0);
helper.ElapseNs(2e6);
serving_device_selector->Completed(0);
serving_device_selector->Completed(1);
helper.ElapseNs(4e6);
serving_device_selector->Completed(1);
serving_device_selector->Completed(2);
helper.ElapseNs(8e6);
serving_device_selector->Completed(2);
serving_device_selector->Completed(3);
helper.ElapseNs(16e6);
serving_device_selector->Completed(3);
serving_device_selector->Enqueue(3, "16ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
16e6);
serving_device_selector->Enqueue(2, "8ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
24e6);
serving_device_selector->Enqueue(1, "4ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
28e6);
serving_device_selector->Enqueue(0, "2ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
30e6);
helper.ElapseNs(2e6);
serving_device_selector->Completed(0);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
22e6);
helper.ElapseNs(2e6);
serving_device_selector->Completed(1);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
16e6);
helper.ElapseNs(4e6);
serving_device_selector->Completed(2);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
8e6);
helper.ElapseNs(8e6);
serving_device_selector->Completed(3);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
0e6);
}
}
}
} | tsl::DeviceReservation GpuServingDeviceSelector::ReserveDevice(
absl::string_view program_fingerprint) {
absl::MutexLock lock(&mu_);
DeviceStates device_states;
device_states.states = absl::Span<const DeviceState>(device_states_);
auto [it, emplaced] =
execution_info_.try_emplace(program_fingerprint, ExecutionInfo());
const int device_index =
device_selector_policy_->SelectDevice(program_fingerprint, device_states);
ServingDeviceSelector::EnqueueHelper(
device_states_.at(device_index), device_index, it->second,
program_fingerprint, 0, req_id_counter_++,
1, 0, NowNs());
return tsl::DeviceReservation(device_index, this);
} | namespace {
TEST(GpuServingDeviceSelector, Basic) {
GpuServingDeviceSelector selector(2,
std::make_unique<tsl::RoundRobinPolicy>());
const std::string program_fingerprint = "TensorFlow";
tsl::DeviceReservation reservation =
selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 1);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
} |
#ifndef ABSL_STRINGS_CHARSET_H_
#define ABSL_STRINGS_CHARSET_H_
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/strings/string_view.h"
namespace absl {
class CharSet {
public:
constexpr CharSet() : m_() {}
constexpr explicit CharSet(absl::string_view str) : m_() {
for (char c : str) {
SetChar(static_cast<unsigned char>(c));
}
}
constexpr bool contains(char c) const {
return ((m_[static_cast<unsigned char>(c) / 64] >>
(static_cast<unsigned char>(c) % 64)) &
0x1) == 0x1;
}
constexpr bool empty() const {
for (uint64_t c : m_) {
if (c != 0) return false;
}
return true;
}
static constexpr CharSet Char(char x) {
return CharSet(CharMaskForWord(x, 0), CharMaskForWord(x, 1),
CharMaskForWord(x, 2), CharMaskForWord(x, 3));
}
static constexpr CharSet Range(char lo, char hi) {
return CharSet(RangeForWord(lo, hi, 0), RangeForWord(lo, hi, 1),
RangeForWord(lo, hi, 2), RangeForWord(lo, hi, 3));
}
friend constexpr CharSet operator&(const CharSet& a, const CharSet& b) {
return CharSet(a.m_[0] & b.m_[0], a.m_[1] & b.m_[1], a.m_[2] & b.m_[2],
a.m_[3] & b.m_[3]);
}
friend constexpr CharSet operator|(const CharSet& a, const CharSet& b) {
return CharSet(a.m_[0] | b.m_[0], a.m_[1] | b.m_[1], a.m_[2] | b.m_[2],
a.m_[3] | b.m_[3]);
}
friend constexpr CharSet operator~(const CharSet& a) {
return CharSet(~a.m_[0], ~a.m_[1], ~a.m_[2], ~a.m_[3]);
}
static constexpr CharSet AsciiUppercase() { return CharSet::Range('A', 'Z'); }
static constexpr CharSet AsciiLowercase() { return CharSet::Range('a', 'z'); }
static constexpr CharSet AsciiDigits() { return CharSet::Range('0', '9'); }
static constexpr CharSet AsciiAlphabet() {
return AsciiLowercase() | AsciiUppercase();
}
static constexpr CharSet AsciiAlphanumerics() {
return AsciiDigits() | AsciiAlphabet();
}
static constexpr CharSet AsciiHexDigits() {
return AsciiDigits() | CharSet::Range('A', 'F') | CharSet::Range('a', 'f');
}
static constexpr CharSet AsciiPrintable() {
return CharSet::Range(0x20, 0x7e);
}
static constexpr CharSet AsciiWhitespace() { return CharSet("\t\n\v\f\r "); }
static constexpr CharSet AsciiPunctuation() {
return AsciiPrintable() & ~AsciiWhitespace() & ~AsciiAlphanumerics();
}
private:
constexpr CharSet(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3)
: m_{b0, b1, b2, b3} {}
static constexpr uint64_t RangeForWord(char lo, char hi, uint64_t word) {
return OpenRangeFromZeroForWord(static_cast<unsigned char>(hi) + 1, word) &
~OpenRangeFromZeroForWord(static_cast<unsigned char>(lo), word);
}
static constexpr uint64_t OpenRangeFromZeroForWord(uint64_t upper,
uint64_t word) {
return (upper <= 64 * word) ? 0
: (upper >= 64 * (word + 1))
? ~static_cast<uint64_t>(0)
: (~static_cast<uint64_t>(0) >> (64 - upper % 64));
}
static constexpr uint64_t CharMaskForWord(char x, uint64_t word) {
return (static_cast<unsigned char>(x) / 64 == word)
? (static_cast<uint64_t>(1)
<< (static_cast<unsigned char>(x) % 64))
: 0;
}
constexpr void SetChar(unsigned char c) {
m_[c / 64] |= static_cast<uint64_t>(1) << (c % 64);
}
uint64_t m_[4];
};
}
#endif | #include "absl/strings/charset.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
namespace {
constexpr absl::CharSet everything_map = ~absl::CharSet();
constexpr absl::CharSet nothing_map = absl::CharSet();
TEST(Charmap, AllTests) {
const absl::CharSet also_nothing_map("");
EXPECT_TRUE(everything_map.contains('\0'));
EXPECT_FALSE(nothing_map.contains('\0'));
EXPECT_FALSE(also_nothing_map.contains('\0'));
for (unsigned char ch = 1; ch != 0; ++ch) {
SCOPED_TRACE(ch);
EXPECT_TRUE(everything_map.contains(ch));
EXPECT_FALSE(nothing_map.contains(ch));
EXPECT_FALSE(also_nothing_map.contains(ch));
}
const absl::CharSet symbols(absl::string_view("&@#@^!@?", 5));
EXPECT_TRUE(symbols.contains('&'));
EXPECT_TRUE(symbols.contains('@'));
EXPECT_TRUE(symbols.contains('#'));
EXPECT_TRUE(symbols.contains('^'));
EXPECT_FALSE(symbols.contains('!'));
EXPECT_FALSE(symbols.contains('?'));
int cnt = 0;
for (unsigned char ch = 1; ch != 0; ++ch) cnt += symbols.contains(ch);
EXPECT_EQ(cnt, 4);
const absl::CharSet lets(absl::string_view("^abcde", 3));
const absl::CharSet lets2(absl::string_view("fghij\0klmnop", 10));
const absl::CharSet lets3("fghij\0klmnop");
EXPECT_TRUE(lets2.contains('k'));
EXPECT_FALSE(lets3.contains('k'));
EXPECT_FALSE((symbols & lets).empty());
EXPECT_TRUE((lets2 & lets).empty());
EXPECT_FALSE((lets & symbols).empty());
EXPECT_TRUE((lets & lets2).empty());
EXPECT_TRUE(nothing_map.empty());
EXPECT_FALSE(lets.empty());
}
std::string Members(const absl::CharSet& m) {
std::string r;
for (size_t i = 0; i < 256; ++i)
if (m.contains(i)) r.push_back(i);
return r;
}
std::string ClosedRangeString(unsigned char lo, unsigned char hi) {
std::string s;
while (true) {
s.push_back(lo);
if (lo == hi) break;
++lo;
}
return s;
}
TEST(Charmap, Constexpr) {
constexpr absl::CharSet kEmpty = absl::CharSet();
EXPECT_EQ(Members(kEmpty), "");
constexpr absl::CharSet kA = absl::CharSet::Char('A');
EXPECT_EQ(Members(kA), "A");
constexpr absl::CharSet kAZ = absl::CharSet::Range('A', 'Z');
EXPECT_EQ(Members(kAZ), "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
constexpr absl::CharSet kIdentifier =
absl::CharSet::Range('0', '9') | absl::CharSet::Range('A', 'Z') |
absl::CharSet::Range('a', 'z') | absl::CharSet::Char('_');
EXPECT_EQ(Members(kIdentifier),
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"
"abcdefghijklmnopqrstuvwxyz");
constexpr absl::CharSet kAll = ~absl::CharSet();
for (size_t i = 0; i < 256; ++i) {
SCOPED_TRACE(i);
EXPECT_TRUE(kAll.contains(i));
}
constexpr absl::CharSet kHello = absl::CharSet("Hello, world!");
EXPECT_EQ(Members(kHello), " !,Hdelorw");
constexpr absl::CharSet kABC =
absl::CharSet::Range('A', 'Z') & ~absl::CharSet::Range('D', 'Z');
EXPECT_EQ(Members(kABC), "ABC");
constexpr bool kContainsA = absl::CharSet("abc").contains('a');
EXPECT_TRUE(kContainsA);
constexpr bool kContainsD = absl::CharSet("abc").contains('d');
EXPECT_FALSE(kContainsD);
constexpr bool kEmptyIsEmpty = absl::CharSet().empty();
EXPECT_TRUE(kEmptyIsEmpty);
constexpr bool kNotEmptyIsEmpty = absl::CharSet("abc").empty();
EXPECT_FALSE(kNotEmptyIsEmpty);
}
TEST(Charmap, Range) {
std::vector<size_t> poi = {0, 1, 2, 3, 4, 7, 8, 9, 15,
16, 17, 30, 31, 32, 33, 63, 64, 65,
127, 128, 129, 223, 224, 225, 254, 255};
for (auto lo = poi.begin(); lo != poi.end(); ++lo) {
SCOPED_TRACE(*lo);
for (auto hi = lo; hi != poi.end(); ++hi) {
SCOPED_TRACE(*hi);
EXPECT_EQ(Members(absl::CharSet::Range(*lo, *hi)),
ClosedRangeString(*lo, *hi));
}
}
}
TEST(Charmap, NullByteWithStringView) {
char characters[5] = {'a', 'b', '\0', 'd', 'x'};
absl::string_view view(characters, 5);
absl::CharSet tester(view);
EXPECT_TRUE(tester.contains('a'));
EXPECT_TRUE(tester.contains('b'));
EXPECT_TRUE(tester.contains('\0'));
EXPECT_TRUE(tester.contains('d'));
EXPECT_TRUE(tester.contains('x'));
EXPECT_FALSE(tester.contains('c'));
}
TEST(CharmapCtype, Match) {
for (int c = 0; c < 256; ++c) {
SCOPED_TRACE(c);
SCOPED_TRACE(static_cast<char>(c));
EXPECT_EQ(absl::ascii_isupper(c),
absl::CharSet::AsciiUppercase().contains(c));
EXPECT_EQ(absl::ascii_islower(c),
absl::CharSet::AsciiLowercase().contains(c));
EXPECT_EQ(absl::ascii_isdigit(c), absl::CharSet::AsciiDigits().contains(c));
EXPECT_EQ(absl::ascii_isalpha(c),
absl::CharSet::AsciiAlphabet().contains(c));
EXPECT_EQ(absl::ascii_isalnum(c),
absl::CharSet::AsciiAlphanumerics().contains(c));
EXPECT_EQ(absl::ascii_isxdigit(c),
absl::CharSet::AsciiHexDigits().contains(c));
EXPECT_EQ(absl::ascii_isprint(c),
absl::CharSet::AsciiPrintable().contains(c));
EXPECT_EQ(absl::ascii_isspace(c),
absl::CharSet::AsciiWhitespace().contains(c));
EXPECT_EQ(absl::ascii_ispunct(c),
absl::CharSet::AsciiPunctuation().contains(c));
}
}
} | static constexpr CharSet AsciiPrintable() {
return CharSet::Range(0x20, 0x7e);
} | TEST(CharmapCtype, Match) {
for (int c = 0; c < 256; ++c) {
SCOPED_TRACE(c);
SCOPED_TRACE(static_cast<char>(c));
EXPECT_EQ(absl::ascii_isupper(c),
absl::CharSet::AsciiUppercase().contains(c));
EXPECT_EQ(absl::ascii_islower(c),
absl::CharSet::AsciiLowercase().contains(c));
EXPECT_EQ(absl::ascii_isdigit(c), absl::CharSet::AsciiDigits().contains(c));
EXPECT_EQ(absl::ascii_isalpha(c),
absl::CharSet::AsciiAlphabet().contains(c));
EXPECT_EQ(absl::ascii_isalnum(c),
absl::CharSet::AsciiAlphanumerics().contains(c));
EXPECT_EQ(absl::ascii_isxdigit(c),
absl::CharSet::AsciiHexDigits().contains(c));
EXPECT_EQ(absl::ascii_isprint(c),
absl::CharSet::AsciiPrintable().contains(c));
EXPECT_EQ(absl::ascii_isspace(c),
absl::CharSet::AsciiWhitespace().contains(c));
EXPECT_EQ(absl::ascii_ispunct(c),
absl::CharSet::AsciiPunctuation().contains(c));
}
} |
#ifndef QUICHE_COMMON_QUICHE_LINKED_HASH_MAP_H_
#define QUICHE_COMMON_QUICHE_LINKED_HASH_MAP_H_
#include <functional>
#include <list>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
template <class Key,
class Value,
class Hash = absl::Hash<Key>,
class Eq = std::equal_to<Key>>
class QuicheLinkedHashMap {
private:
typedef std::list<std::pair<Key, Value>> ListType;
typedef absl::flat_hash_map<Key, typename ListType::iterator, Hash, Eq>
MapType;
public:
typedef typename ListType::iterator iterator;
typedef typename ListType::reverse_iterator reverse_iterator;
typedef typename ListType::const_iterator const_iterator;
typedef typename ListType::const_reverse_iterator const_reverse_iterator;
typedef typename MapType::key_type key_type;
typedef typename ListType::value_type value_type;
typedef typename ListType::size_type size_type;
QuicheLinkedHashMap() = default;
explicit QuicheLinkedHashMap(size_type bucket_count) : map_(bucket_count) {}
QuicheLinkedHashMap(const QuicheLinkedHashMap& other) = delete;
QuicheLinkedHashMap& operator=(const QuicheLinkedHashMap& other) = delete;
QuicheLinkedHashMap(QuicheLinkedHashMap&& other) = default;
QuicheLinkedHashMap& operator=(QuicheLinkedHashMap&& other) = default;
iterator begin() { return list_.begin(); }
const_iterator begin() const { return list_.begin(); }
iterator end() { return list_.end(); }
const_iterator end() const { return list_.end(); }
reverse_iterator rbegin() { return list_.rbegin(); }
const_reverse_iterator rbegin() const { return list_.rbegin(); }
reverse_iterator rend() { return list_.rend(); }
const_reverse_iterator rend() const { return list_.rend(); }
const value_type& front() const { return list_.front(); }
value_type& front() { return list_.front(); }
const value_type& back() const { return list_.back(); }
value_type& back() { return list_.back(); }
void clear() {
map_.clear();
list_.clear();
}
bool empty() const { return list_.empty(); }
void pop_front() { erase(begin()); }
size_type erase(const Key& key) {
typename MapType::iterator found = map_.find(key);
if (found == map_.end()) {
return 0;
}
list_.erase(found->second);
map_.erase(found);
return 1;
}
iterator erase(iterator position) {
typename MapType::iterator found = map_.find(position->first);
QUICHE_CHECK(found->second == position)
<< "Inconsistent iterator for map and list, or the iterator is "
"invalid.";
map_.erase(found);
return list_.erase(position);
}
iterator erase(iterator first, iterator last) {
while (first != last && first != end()) {
first = erase(first);
}
return first;
}
iterator find(const Key& key) {
typename MapType::iterator found = map_.find(key);
if (found == map_.end()) {
return end();
}
return found->second;
}
const_iterator find(const Key& key) const {
typename MapType::const_iterator found = map_.find(key);
if (found == map_.end()) {
return end();
}
return found->second;
}
bool contains(const Key& key) const { return find(key) != end(); }
Value& operator[](const key_type& key) {
return (*((this->insert(std::make_pair(key, Value()))).first)).second;
}
std::pair<iterator, bool> insert(const std::pair<Key, Value>& pair) {
return InsertInternal(pair);
}
std::pair<iterator, bool> insert(std::pair<Key, Value>&& pair) {
return InsertInternal(std::move(pair));
}
size_type size() const { return map_.size(); }
template <typename... Args>
std::pair<iterator, bool> emplace(Args&&... args) {
ListType node_donor;
auto node_pos =
node_donor.emplace(node_donor.end(), std::forward<Args>(args)...);
const auto& k = node_pos->first;
auto ins = map_.insert({k, node_pos});
if (!ins.second) {
return {ins.first->second, false};
}
list_.splice(list_.end(), node_donor, node_pos);
return {ins.first->second, true};
}
void swap(QuicheLinkedHashMap& other) {
map_.swap(other.map_);
list_.swap(other.list_);
}
private:
template <typename U>
std::pair<iterator, bool> InsertInternal(U&& pair) {
auto insert_result = map_.try_emplace(pair.first);
auto map_iter = insert_result.first;
if (!insert_result.second) {
return {map_iter->second, false};
}
auto list_iter = list_.insert(list_.end(), std::forward<U>(pair));
map_iter->second = list_iter;
return {list_iter, true};
}
MapType map_;
ListType list_;
};
}
#endif | #include "quiche/common/quiche_linked_hash_map.h"
#include <memory>
#include <tuple>
#include <utility>
#include "quiche/common/platform/api/quiche_test.h"
using testing::Pair;
using testing::Pointee;
using testing::UnorderedElementsAre;
namespace quiche {
namespace test {
TEST(LinkedHashMapTest, Move) {
QuicheLinkedHashMap<int, std::unique_ptr<int>> m;
m[2] = std::make_unique<int>(12);
m[3] = std::make_unique<int>(13);
QuicheLinkedHashMap<int, std::unique_ptr<int>> n = std::move(m);
EXPECT_THAT(n,
UnorderedElementsAre(Pair(2, Pointee(12)), Pair(3, Pointee(13))));
}
TEST(LinkedHashMapTest, CanEmplaceMoveOnly) {
QuicheLinkedHashMap<int, std::unique_ptr<int>> m;
struct Data {
int k, v;
};
const Data data[] = {{1, 123}, {3, 345}, {2, 234}, {4, 456}};
for (const auto& kv : data) {
m.emplace(std::piecewise_construct, std::make_tuple(kv.k),
std::make_tuple(new int{kv.v}));
}
EXPECT_TRUE(m.contains(2));
auto found = m.find(2);
ASSERT_TRUE(found != m.end());
EXPECT_EQ(234, *found->second);
}
struct NoCopy {
explicit NoCopy(int x) : x(x) {}
NoCopy(const NoCopy&) = delete;
NoCopy& operator=(const NoCopy&) = delete;
NoCopy(NoCopy&&) = delete;
NoCopy& operator=(NoCopy&&) = delete;
int x;
};
TEST(LinkedHashMapTest, CanEmplaceNoMoveNoCopy) {
QuicheLinkedHashMap<int, NoCopy> m;
struct Data {
int k, v;
};
const Data data[] = {{1, 123}, {3, 345}, {2, 234}, {4, 456}};
for (const auto& kv : data) {
m.emplace(std::piecewise_construct, std::make_tuple(kv.k),
std::make_tuple(kv.v));
}
EXPECT_TRUE(m.contains(2));
auto found = m.find(2);
ASSERT_TRUE(found != m.end());
EXPECT_EQ(234, found->second.x);
}
TEST(LinkedHashMapTest, ConstKeys) {
QuicheLinkedHashMap<int, int> m;
m.insert(std::make_pair(1, 2));
std::pair<int, int>& p = *m.begin();
EXPECT_EQ(1, p.first);
}
TEST(LinkedHashMapTest, Iteration) {
QuicheLinkedHashMap<int, int> m;
EXPECT_TRUE(m.begin() == m.end());
m.insert(std::make_pair(2, 12));
m.insert(std::make_pair(1, 11));
m.insert(std::make_pair(3, 13));
QuicheLinkedHashMap<int, int>::iterator i = m.begin();
ASSERT_TRUE(m.begin() == i);
ASSERT_TRUE(m.end() != i);
EXPECT_EQ(2, i->first);
EXPECT_EQ(12, i->second);
++i;
ASSERT_TRUE(m.end() != i);
EXPECT_EQ(1, i->first);
EXPECT_EQ(11, i->second);
++i;
ASSERT_TRUE(m.end() != i);
EXPECT_EQ(3, i->first);
EXPECT_EQ(13, i->second);
++i;
ASSERT_TRUE(m.end() == i);
}
TEST(LinkedHashMapTest, ReverseIteration) {
QuicheLinkedHashMap<int, int> m;
EXPECT_TRUE(m.rbegin() == m.rend());
m.insert(std::make_pair(2, 12));
m.insert(std::make_pair(1, 11));
m.insert(std::make_pair(3, 13));
QuicheLinkedHashMap<int, int>::reverse_iterator i = m.rbegin();
ASSERT_TRUE(m.rbegin() == i);
ASSERT_TRUE(m.rend() != i);
EXPECT_EQ(3, i->first);
EXPECT_EQ(13, i->second);
++i;
ASSERT_TRUE(m.rend() != i);
EXPECT_EQ(1, i->first);
EXPECT_EQ(11, i->second);
++i;
ASSERT_TRUE(m.rend() != i);
EXPECT_EQ(2, i->first);
EXPECT_EQ(12, i->second);
++i;
ASSERT_TRUE(m.rend() == i);
}
TEST(LinkedHashMapTest, Clear) {
QuicheLinkedHashMap<int, int> m;
m.insert(std::make_pair(2, 12));
m.insert(std::make_pair(1, 11));
m.insert(std::make_pair(3, 13));
ASSERT_EQ(3u, m.size());
m.clear();
EXPECT_EQ(0u, m.size());
m.clear();
EXPECT_EQ(0u, m.size());
}
TEST(LinkedHashMapTest, Size) {
QuicheLinkedHashMap<int, int> m;
EXPECT_EQ(0u, m.size());
m.insert(std::make_pair(2, 12));
EXPECT_EQ(1u, m.size());
m.insert(std::make_pair(1, 11));
EXPECT_EQ(2u, m.size());
m.insert(std::make_pair(3, 13));
EXPECT_EQ(3u, m.size());
m.clear();
EXPECT_EQ(0u, m.size());
}
TEST(LinkedHashMapTest, Empty) {
QuicheLinkedHashMap<int, int> m;
ASSERT_TRUE(m.empty());
m.insert(std::make_pair(2, 12));
ASSERT_FALSE(m.empty());
m.clear();
ASSERT_TRUE(m.empty());
}
TEST(LinkedHashMapTest, Erase) {
QuicheLinkedHashMap<int, int> m;
ASSERT_EQ(0u, m.size());
EXPECT_EQ(0u, m.erase(2));
m.insert(std::make_pair(2, 12));
ASSERT_EQ(1u, m.size());
EXPECT_EQ(1u, m.erase(2));
EXPECT_EQ(0u, m.size());
EXPECT_EQ(0u, m.erase(2));
EXPECT_EQ(0u, m.size());
}
TEST(LinkedHashMapTest, Erase2) {
QuicheLinkedHashMap<int, int> m;
ASSERT_EQ(0u, m.size());
EXPECT_EQ(0u, m.erase(2));
m.insert(std::make_pair(2, 12));
m.insert(std::make_pair(1, 11));
m.insert(std::make_pair(3, 13));
m.insert(std::make_pair(4, 14));
ASSERT_EQ(4u, m.size());
EXPECT_EQ(1u, m.erase(1));
EXPECT_EQ(1u, m.erase(3));
EXPECT_EQ(2u, m.size());
QuicheLinkedHashMap<int, int>::iterator it = m.begin();
ASSERT_TRUE(it != m.end());
EXPECT_EQ(12, it->second);
++it;
ASSERT_TRUE(it != m.end());
EXPECT_EQ(14, it->second);
++it;
ASSERT_TRUE(it == m.end());
EXPECT_EQ(0u, m.erase(1));
ASSERT_EQ(2u, m.size());
EXPECT_EQ(1u, m.erase(2));
EXPECT_EQ(1u, m.erase(4));
ASSERT_EQ(0u, m.size());
EXPECT_EQ(0u, m.erase(1));
ASSERT_EQ(0u, m.size());
}
TEST(LinkedHashMapTest, Erase3) {
QuicheLinkedHashMap<int, int> m;
m.insert(std::make_pair(1, 11));
m.insert(std::make_pair(2, 12));
m.insert(std::make_pair(3, 13));
m.insert(std::make_pair(4, 14));
QuicheLinkedHashMap<int, int>::iterator it2 = m.find(2);
QuicheLinkedHashMap<int, int>::iterator it4 = m.find(4);
EXPECT_EQ(m.erase(it2, it4), m.find(4));
EXPECT_EQ(2u, m.size());
QuicheLinkedHashMap<int, int>::iterator it = m.begin();
ASSERT_TRUE(it != m.end());
EXPECT_EQ(11, it->second);
++it;
ASSERT_TRUE(it != m.end());
EXPECT_EQ(14, it->second);
++it;
ASSERT_TRUE(it == m.end());
EXPECT_EQ(m.erase(m.begin()), m.find(4));
it = m.begin();
ASSERT_TRUE(it != m.end());
EXPECT_EQ(14, it->second);
++it;
ASSERT_TRUE(it == m.end());
}
TEST(LinkedHashMapTest, Insertion) {
QuicheLinkedHashMap<int, int> m;
ASSERT_EQ(0u, m.size());
std::pair<QuicheLinkedHashMap<int, int>::iterator, bool> result;
result = m.insert(std::make_pair(2, 12));
ASSERT_EQ(1u, m.size());
EXPECT_TRUE(result.second);
EXPECT_EQ(2, result.first->first);
EXPECT_EQ(12, result.first->second);
result = m.insert(std::make_pair(1, 11));
ASSERT_EQ(2u, m.size());
EXPECT_TRUE(result.second);
EXPECT_EQ(1, result.first->first);
EXPECT_EQ(11, result.first->second);
result = m.insert(std::make_pair(3, 13));
QuicheLinkedHashMap<int, int>::iterator result_iterator = result.first;
ASSERT_EQ(3u, m.size());
EXPECT_TRUE(result.second);
EXPECT_EQ(3, result.first->first);
EXPECT_EQ(13, result.first->second);
result = m.insert(std::make_pair(3, 13));
EXPECT_EQ(3u, m.size());
EXPECT_FALSE(result.second) << "No insertion should have occurred.";
EXPECT_TRUE(result_iterator == result.first)
<< "Duplicate insertion should have given us the original iterator.";
}
static std::pair<int, int> Pair(int i, int j) { return {i, j}; }
TEST(LinkedHashMapTest, Front) {
QuicheLinkedHashMap<int, int> m;
m.insert(std::make_pair(2, 12));
m.insert(std::make_pair(1, 11));
m.insert(std::make_pair(3, 13));
EXPECT_EQ(3u, m.size());
EXPECT_EQ(Pair(2, 12), m.front());
m.pop_front();
EXPECT_EQ(2u, m.size());
EXPECT_EQ(Pair(1, 11), m.front());
m.pop_front();
EXPECT_EQ(1u, m.size());
EXPECT_EQ(Pair(3, 13), m.front());
m.pop_front();
EXPECT_TRUE(m.empty());
}
TEST(LinkedHashMapTest, Find) {
QuicheLinkedHashMap<int, int> m;
EXPECT_TRUE(m.end() == m.find(1))
<< "We shouldn't find anything in an empty map.";
m.insert(std::make_pair(2, 12));
EXPECT_TRUE(m.end() == m.find(1))
<< "We shouldn't find an element that doesn't exist in the map.";
std::pair<QuicheLinkedHashMap<int, int>::iterator, bool> result =
m.insert(std::make_pair(1, 11));
ASSERT_TRUE(result.second);
ASSERT_TRUE(m.end() != result.first);
EXPECT_TRUE(result.first == m.find(1))
<< "We should have found an element we know exists in the map.";
EXPECT_EQ(11, result.first->second);
m.insert(std::make_pair(3, 13));
QuicheLinkedHashMap<int, int>::iterator it = m.find(1);
ASSERT_TRUE(m.end() != it);
EXPECT_EQ(11, it->second);
m.clear();
EXPECT_TRUE(m.end() == m.find(1))
<< "We shouldn't find anything in a map that we've cleared.";
}
TEST(LinkedHashMapTest, Contains) {
QuicheLinkedHashMap<int, int> m;
EXPECT_FALSE(m.contains(1)) << "An empty map shouldn't contain anything.";
m.insert(std::make_pair(2, 12));
EXPECT_FALSE(m.contains(1))
<< "The map shouldn't contain an element that doesn't exist.";
m.insert(std::make_pair(1, 11));
EXPECT_TRUE(m.contains(1))
<< "The map should contain an element that we know exists.";
m.clear();
EXPECT_FALSE(m.contains(1))
<< "A map that we've cleared shouldn't contain anything.";
}
TEST(LinkedHashMapTest, Swap) {
QuicheLinkedHashMap<int, int> m1;
QuicheLinkedHashMap<int, int> m2;
m1.insert(std::make_pair(1, 1));
m1.insert(std::make_pair(2, 2));
m2.insert(std::make_pair(3, 3));
ASSERT_EQ(2u, m1.size());
ASSERT_EQ(1u, m2.size());
m1.swap(m2);
ASSERT_EQ(1u, m1.size());
ASSERT_EQ(2u, m2.size());
}
TEST(LinkedHashMapTest, CustomHashAndEquality) {
struct CustomIntHash {
size_t operator()(int x) const { return x; }
};
QuicheLinkedHashMap<int, int, CustomIntHash> m;
m.insert(std::make_pair(1, 1));
EXPECT_TRUE(m.contains(1));
EXPECT_EQ(1, m[1]);
}
}
} | const_iterator end() const { return list_.end(); } | TEST(LinkedHashMapTest, Iteration) {
QuicheLinkedHashMap<int, int> m;
EXPECT_TRUE(m.begin() == m.end());
m.insert(std::make_pair(2, 12));
m.insert(std::make_pair(1, 11));
m.insert(std::make_pair(3, 13));
QuicheLinkedHashMap<int, int>::iterator i = m.begin();
ASSERT_TRUE(m.begin() == i);
ASSERT_TRUE(m.end() != i);
EXPECT_EQ(2, i->first);
EXPECT_EQ(12, i->second);
++i;
ASSERT_TRUE(m.end() != i);
EXPECT_EQ(1, i->first);
EXPECT_EQ(11, i->second);
++i;
ASSERT_TRUE(m.end() != i);
EXPECT_EQ(3, i->first);
EXPECT_EQ(13, i->second);
++i;
ASSERT_TRUE(m.end() == i);
} |
#include "parser/macro_expr_factory.h"
#include <utility>
#include <vector>
#include "absl/functional/overload.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/constant.h"
#include "common/expr.h"
namespace cel {
Expr MacroExprFactory::Copy(const Expr& expr) {
return absl::visit(
absl::Overload(
[this, &expr](const UnspecifiedExpr&) -> Expr {
return NewUnspecified(CopyId(expr));
},
[this, &expr](const Constant& const_expr) -> Expr {
return NewConst(CopyId(expr), const_expr);
},
[this, &expr](const IdentExpr& ident_expr) -> Expr {
return NewIdent(CopyId(expr), ident_expr.name());
},
[this, &expr](const SelectExpr& select_expr) -> Expr {
const auto id = CopyId(expr);
return select_expr.test_only()
? NewPresenceTest(id, Copy(select_expr.operand()),
select_expr.field())
: NewSelect(id, Copy(select_expr.operand()),
select_expr.field());
},
[this, &expr](const CallExpr& call_expr) -> Expr {
const auto id = CopyId(expr);
absl::optional<Expr> target;
if (call_expr.has_target()) {
target = Copy(call_expr.target());
}
std::vector<Expr> args;
args.reserve(call_expr.args().size());
for (const auto& arg : call_expr.args()) {
args.push_back(Copy(arg));
}
return target.has_value()
? NewMemberCall(id, call_expr.function(),
std::move(*target), std::move(args))
: NewCall(id, call_expr.function(), std::move(args));
},
[this, &expr](const ListExpr& list_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<ListExprElement> elements;
elements.reserve(list_expr.elements().size());
for (const auto& element : list_expr.elements()) {
elements.push_back(Copy(element));
}
return NewList(id, std::move(elements));
},
[this, &expr](const StructExpr& struct_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<StructExprField> fields;
fields.reserve(struct_expr.fields().size());
for (const auto& field : struct_expr.fields()) {
fields.push_back(Copy(field));
}
return NewStruct(id, struct_expr.name(), std::move(fields));
},
[this, &expr](const MapExpr& map_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<MapExprEntry> entries;
entries.reserve(map_expr.entries().size());
for (const auto& entry : map_expr.entries()) {
entries.push_back(Copy(entry));
}
return NewMap(id, std::move(entries));
},
[this, &expr](const ComprehensionExpr& comprehension_expr) -> Expr {
const auto id = CopyId(expr);
auto iter_range = Copy(comprehension_expr.iter_range());
auto accu_init = Copy(comprehension_expr.accu_init());
auto loop_condition = Copy(comprehension_expr.loop_condition());
auto loop_step = Copy(comprehension_expr.loop_step());
auto result = Copy(comprehension_expr.result());
return NewComprehension(
id, comprehension_expr.iter_var(), std::move(iter_range),
comprehension_expr.accu_var(), std::move(accu_init),
std::move(loop_condition), std::move(loop_step),
std::move(result));
}),
expr.kind());
}
ListExprElement MacroExprFactory::Copy(const ListExprElement& element) {
return NewListElement(Copy(element.expr()), element.optional());
}
StructExprField MacroExprFactory::Copy(const StructExprField& field) {
auto field_id = CopyId(field.id());
auto field_value = Copy(field.value());
return NewStructField(field_id, field.name(), std::move(field_value),
field.optional());
}
MapExprEntry MacroExprFactory::Copy(const MapExprEntry& entry) {
auto entry_id = CopyId(entry.id());
auto entry_key = Copy(entry.key());
auto entry_value = Copy(entry.value());
return NewMapEntry(entry_id, std::move(entry_key), std::move(entry_value),
entry.optional());
}
} | #include "parser/macro_expr_factory.h"
#include <cstdint>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/expr.h"
#include "common/expr_factory.h"
#include "internal/testing.h"
namespace cel {
class TestMacroExprFactory final : public MacroExprFactory {
public:
TestMacroExprFactory() : MacroExprFactory() {}
ExprId id() const { return id_; }
Expr ReportError(absl::string_view) override {
return NewUnspecified(NextId());
}
Expr ReportErrorAt(const Expr&, absl::string_view) override {
return NewUnspecified(NextId());
}
using MacroExprFactory::NewBoolConst;
using MacroExprFactory::NewCall;
using MacroExprFactory::NewComprehension;
using MacroExprFactory::NewIdent;
using MacroExprFactory::NewList;
using MacroExprFactory::NewListElement;
using MacroExprFactory::NewMap;
using MacroExprFactory::NewMapEntry;
using MacroExprFactory::NewMemberCall;
using MacroExprFactory::NewSelect;
using MacroExprFactory::NewStruct;
using MacroExprFactory::NewStructField;
using MacroExprFactory::NewUnspecified;
protected:
ExprId NextId() override { return id_++; }
ExprId CopyId(ExprId id) override {
if (id == 0) {
return 0;
}
return NextId();
}
private:
int64_t id_ = 1;
};
namespace {
TEST(MacroExprFactory, CopyUnspecified) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewUnspecified()), factory.NewUnspecified(2));
}
TEST(MacroExprFactory, CopyIdent) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewIdent("foo")), factory.NewIdent(2, "foo"));
}
TEST(MacroExprFactory, CopyConst) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewBoolConst(true)),
factory.NewBoolConst(2, true));
}
TEST(MacroExprFactory, CopySelect) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewSelect(factory.NewIdent("foo"), "bar")),
factory.NewSelect(3, factory.NewIdent(4, "foo"), "bar"));
}
TEST(MacroExprFactory, CopyCall) {
TestMacroExprFactory factory;
std::vector<Expr> copied_args;
copied_args.reserve(1);
copied_args.push_back(factory.NewIdent(6, "baz"));
EXPECT_EQ(factory.Copy(factory.NewMemberCall("bar", factory.NewIdent("foo"),
factory.NewIdent("baz"))),
factory.NewMemberCall(4, "bar", factory.NewIdent(5, "foo"),
absl::MakeSpan(copied_args)));
}
TEST(MacroExprFactory, CopyList) {
TestMacroExprFactory factory;
std::vector<ListExprElement> copied_elements;
copied_elements.reserve(1);
copied_elements.push_back(factory.NewListElement(factory.NewIdent(4, "foo")));
EXPECT_EQ(factory.Copy(factory.NewList(
factory.NewListElement(factory.NewIdent("foo")))),
factory.NewList(3, absl::MakeSpan(copied_elements)));
}
TEST(MacroExprFactory, CopyStruct) {
TestMacroExprFactory factory;
std::vector<StructExprField> copied_fields;
copied_fields.reserve(1);
copied_fields.push_back(
factory.NewStructField(5, "bar", factory.NewIdent(6, "baz")));
EXPECT_EQ(factory.Copy(factory.NewStruct(
"foo", factory.NewStructField("bar", factory.NewIdent("baz")))),
factory.NewStruct(4, "foo", absl::MakeSpan(copied_fields)));
}
TEST(MacroExprFactory, CopyMap) {
TestMacroExprFactory factory;
std::vector<MapExprEntry> copied_entries;
copied_entries.reserve(1);
copied_entries.push_back(factory.NewMapEntry(6, factory.NewIdent(7, "bar"),
factory.NewIdent(8, "baz")));
EXPECT_EQ(factory.Copy(factory.NewMap(factory.NewMapEntry(
factory.NewIdent("bar"), factory.NewIdent("baz")))),
factory.NewMap(5, absl::MakeSpan(copied_entries)));
}
TEST(MacroExprFactory, CopyComprehension) {
TestMacroExprFactory factory;
EXPECT_EQ(
factory.Copy(factory.NewComprehension(
"foo", factory.NewList(), "bar", factory.NewBoolConst(true),
factory.NewIdent("baz"), factory.NewIdent("foo"),
factory.NewIdent("bar"))),
factory.NewComprehension(
7, "foo", factory.NewList(8, std::vector<ListExprElement>()), "bar",
factory.NewBoolConst(9, true), factory.NewIdent(10, "baz"),
factory.NewIdent(11, "foo"), factory.NewIdent(12, "bar")));
}
}
} | #include "parser/macro_expr_factory.h"
#include <utility>
#include <vector>
#include "absl/functional/overload.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/constant.h"
#include "common/expr.h"
namespace cel {
Expr MacroExprFactory::Copy(const Expr& expr) {
return absl::visit(
absl::Overload(
[this, &expr](const UnspecifiedExpr&) -> Expr {
return NewUnspecified(CopyId(expr));
},
[this, &expr](const Constant& const_expr) -> Expr {
return NewConst(CopyId(expr), const_expr);
},
[this, &expr](const IdentExpr& ident_expr) -> Expr {
return NewIdent(CopyId(expr), ident_expr.name());
},
[this, &expr](const SelectExpr& select_expr) -> Expr {
const auto id = CopyId(expr);
return select_expr.test_only()
? NewPresenceTest(id, Copy(select_expr.operand()),
select_expr.field())
: NewSelect(id, Copy(select_expr.operand()),
select_expr.field());
},
[this, &expr](const CallExpr& call_expr) -> Expr {
const auto id = CopyId(expr);
absl::optional<Expr> target;
if (call_expr.has_target()) {
target = Copy(call_expr.target());
}
std::vector<Expr> args;
args.reserve(call_expr.args().size());
for (const auto& arg : call_expr.args()) {
args.push_back(Copy(arg));
}
return target.has_value()
? NewMemberCall(id, call_expr.function(),
std::move(*target), std::move(args))
: NewCall(id, call_expr.function(), std::move(args));
},
[this, &expr](const ListExpr& list_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<ListExprElement> elements;
elements.reserve(list_expr.elements().size());
for (const auto& element : list_expr.elements()) {
elements.push_back(Copy(element));
}
return NewList(id, std::move(elements));
},
[this, &expr](const StructExpr& struct_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<StructExprField> fields;
fields.reserve(struct_expr.fields().size());
for (const auto& field : struct_expr.fields()) {
fields.push_back(Copy(field));
}
return NewStruct(id, struct_expr.name(), std::move(fields));
},
[this, &expr](const MapExpr& map_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<MapExprEntry> entries;
entries.reserve(map_expr.entries().size());
for (const auto& entry : map_expr.entries()) {
entries.push_back(Copy(entry));
}
return NewMap(id, std::move(entries));
},
[this, &expr](const ComprehensionExpr& comprehension_expr) -> Expr {
const auto id = CopyId(expr);
auto iter_range = Copy(comprehension_expr.iter_range());
auto accu_init = Copy(comprehension_expr.accu_init());
auto loop_condition = Copy(comprehension_expr.loop_condition());
auto loop_step = Copy(comprehension_expr.loop_step());
auto result = Copy(comprehension_expr.result());
return NewComprehension(
id, comprehension_expr.iter_var(), std::move(iter_range),
comprehension_expr.accu_var(), std::move(accu_init),
std::move(loop_condition), std::move(loop_step),
std::move(result));
}),
expr.kind());
} | namespace {
TEST(MacroExprFactory, CopyUnspecified) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewUnspecified()), factory.NewUnspecified(2));
}
TEST(MacroExprFactory, CopyIdent) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewIdent("foo")), factory.NewIdent(2, "foo"));
}
TEST(MacroExprFactory, CopyConst) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewBoolConst(true)),
factory.NewBoolConst(2, true));
}
TEST(MacroExprFactory, CopySelect) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewSelect(factory.NewIdent("foo"), "bar")),
factory.NewSelect(3, factory.NewIdent(4, "foo"), "bar"));
}
TEST(MacroExprFactory, CopyCall) {
TestMacroExprFactory factory;
std::vector<Expr> copied_args;
copied_args.reserve(1);
copied_args.push_back(factory.NewIdent(6, "baz"));
EXPECT_EQ(factory.Copy(factory.NewMemberCall("bar", factory.NewIdent("foo"),
factory.NewIdent("baz"))),
factory.NewMemberCall(4, "bar", factory.NewIdent(5, "foo"),
absl::MakeSpan(copied_args)));
}
TEST(MacroExprFactory, CopyList) {
TestMacroExprFactory factory;
std::vector<ListExprElement> copied_elements;
copied_elements.reserve(1);
copied_elements.push_back(factory.NewListElement(factory.NewIdent(4, "foo")));
EXPECT_EQ(factory.Copy(factory.NewList(
factory.NewListElement(factory.NewIdent("foo")))),
factory.NewList(3, absl::MakeSpan(copied_elements)));
}
TEST(MacroExprFactory, CopyStruct) {
TestMacroExprFactory factory;
std::vector<StructExprField> copied_fields;
copied_fields.reserve(1);
copied_fields.push_back(
factory.NewStructField(5, "bar", factory.NewIdent(6, "baz")));
EXPECT_EQ(factory.Copy(factory.NewStruct(
"foo", factory.NewStructField("bar", factory.NewIdent("baz")))),
factory.NewStruct(4, "foo", absl::MakeSpan(copied_fields)));
}
TEST(MacroExprFactory, CopyMap) {
TestMacroExprFactory factory;
std::vector<MapExprEntry> copied_entries;
copied_entries.reserve(1);
copied_entries.push_back(factory.NewMapEntry(6, factory.NewIdent(7, "bar"),
factory.NewIdent(8, "baz")));
EXPECT_EQ(factory.Copy(factory.NewMap(factory.NewMapEntry(
factory.NewIdent("bar"), factory.NewIdent("baz")))),
factory.NewMap(5, absl::MakeSpan(copied_entries)));
}
TEST(MacroExprFactory, CopyComprehension) {
TestMacroExprFactory factory;
EXPECT_EQ(
factory.Copy(factory.NewComprehension(
"foo", factory.NewList(), "bar", factory.NewBoolConst(true),
factory.NewIdent("baz"), factory.NewIdent("foo"),
factory.NewIdent("bar"))),
factory.NewComprehension(
7, "foo", factory.NewList(8, std::vector<ListExprElement>()), "bar",
factory.NewBoolConst(9, true), factory.NewIdent(10, "baz"),
factory.NewIdent(11, "foo"), factory.NewIdent(12, "bar")));
} |
#ifndef TENSORFLOW_TSL_LIB_GTL_INT_TYPE_H_
#define TENSORFLOW_TSL_LIB_GTL_INT_TYPE_H_
#include <stddef.h>
#include <functional>
#include <iosfwd>
#include <ostream>
#include <unordered_map>
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
template <typename IntTypeName, typename _ValueType>
class IntType;
#define TSL_LIB_GTL_DEFINE_INT_TYPE(int_type_name, value_type) \
struct int_type_name##_tag_ {}; \
typedef ::tsl::gtl::IntType<int_type_name##_tag_, value_type> int_type_name;
template <typename IntTypeName, typename _ValueType>
class IntType {
public:
typedef _ValueType ValueType;
typedef IntType<IntTypeName, ValueType> ThisType;
struct Hasher {
size_t operator()(const IntType& arg) const {
return static_cast<size_t>(arg.value());
}
};
template <typename H>
friend H AbslHashValue(H h, const IntType& i) {
return H::combine(std::move(h), i.value());
}
public:
constexpr IntType() : value_(0) {}
constexpr explicit IntType(ValueType value) : value_(value) {}
constexpr ValueType value() const { return value_; }
template <typename ValType>
constexpr ValType value() const {
return static_cast<ValType>(value_);
}
ThisType& operator++() {
++value_;
return *this;
}
const ThisType operator++(int v) {
ThisType temp(*this);
++value_;
return temp;
}
ThisType& operator--() {
--value_;
return *this;
}
const ThisType operator--(int v) {
ThisType temp(*this);
--value_;
return temp;
}
constexpr bool operator!() const { return value_ == 0; }
constexpr const ThisType operator+() const { return ThisType(value_); }
constexpr const ThisType operator-() const { return ThisType(-value_); }
constexpr const ThisType operator~() const { return ThisType(~value_); }
#define INT_TYPE_ASSIGNMENT_OP(op) \
ThisType& operator op(const ThisType& arg_value) { \
value_ op arg_value.value(); \
return *this; \
} \
ThisType& operator op(ValueType arg_value) { \
value_ op arg_value; \
return *this; \
}
INT_TYPE_ASSIGNMENT_OP(+=);
INT_TYPE_ASSIGNMENT_OP(-=);
INT_TYPE_ASSIGNMENT_OP(*=);
INT_TYPE_ASSIGNMENT_OP(/=);
INT_TYPE_ASSIGNMENT_OP(<<=);
INT_TYPE_ASSIGNMENT_OP(>>=);
INT_TYPE_ASSIGNMENT_OP(%=);
#undef INT_TYPE_ASSIGNMENT_OP
ThisType& operator=(ValueType arg_value) {
value_ = arg_value;
return *this;
}
private:
ValueType value_;
static_assert(std::is_integral<ValueType>::value, "invalid integer type");
} TF_PACKED;
template <typename IntTypeName, typename ValueType>
std::ostream& operator<<(std::ostream& os,
IntType<IntTypeName, ValueType> arg) {
return os << arg.value();
}
#define INT_TYPE_ARITHMETIC_OP(op) \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
IntType<IntTypeName, ValueType> id_1, \
IntType<IntTypeName, ValueType> id_2) { \
return IntType<IntTypeName, ValueType>(id_1.value() op id_2.value()); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
IntType<IntTypeName, ValueType> id, \
typename IntType<IntTypeName, ValueType>::ValueType arg_val) { \
return IntType<IntTypeName, ValueType>(id.value() op arg_val); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
typename IntType<IntTypeName, ValueType>::ValueType arg_val, \
IntType<IntTypeName, ValueType> id) { \
return IntType<IntTypeName, ValueType>(arg_val op id.value()); \
}
INT_TYPE_ARITHMETIC_OP(+);
INT_TYPE_ARITHMETIC_OP(-);
INT_TYPE_ARITHMETIC_OP(*);
INT_TYPE_ARITHMETIC_OP(/);
INT_TYPE_ARITHMETIC_OP(<<);
INT_TYPE_ARITHMETIC_OP(>>);
INT_TYPE_ARITHMETIC_OP(%);
#undef INT_TYPE_ARITHMETIC_OP
#define INT_TYPE_COMPARISON_OP(op) \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
IntType<IntTypeName, ValueType> id_1, \
IntType<IntTypeName, ValueType> id_2) { \
return id_1.value() op id_2.value(); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
IntType<IntTypeName, ValueType> id, \
typename IntType<IntTypeName, ValueType>::ValueType val) { \
return id.value() op val; \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
typename IntType<IntTypeName, ValueType>::ValueType val, \
IntType<IntTypeName, ValueType> id) { \
return val op id.value(); \
}
INT_TYPE_COMPARISON_OP(==);
INT_TYPE_COMPARISON_OP(!=);
INT_TYPE_COMPARISON_OP(<);
INT_TYPE_COMPARISON_OP(<=);
INT_TYPE_COMPARISON_OP(>);
INT_TYPE_COMPARISON_OP(>=);
#undef INT_TYPE_COMPARISON_OP
}
}
#endif | #include "tsl/lib/gtl/int_type.h"
#include <memory>
#include <unordered_map>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TSL_LIB_GTL_DEFINE_INT_TYPE(Int8_IT, int8);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt8_IT, uint8);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int16_IT, int16);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt16_IT, uint16);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int32_IT, int32);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int64_IT, int64_t);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt32_IT, uint32);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt64_IT, uint64);
TSL_LIB_GTL_DEFINE_INT_TYPE(Long_IT, long);
template <typename IntType_Type>
class IntTypeTest : public ::testing::Test {};
typedef ::testing::Types<Int8_IT, UInt8_IT, Int16_IT, UInt16_IT, Int32_IT,
Int64_IT, UInt64_IT, Long_IT>
SupportedIntTypes;
TYPED_TEST_SUITE(IntTypeTest, SupportedIntTypes);
TYPED_TEST(IntTypeTest, TestInitialization) {
constexpr TypeParam a;
constexpr TypeParam b(1);
constexpr TypeParam c(b);
EXPECT_EQ(0, a);
EXPECT_EQ(1, b);
EXPECT_EQ(1, c);
}
TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
}
TYPED_TEST(IntTypeTest, TestHashFunctor) {
std::unordered_map<TypeParam, char, typename TypeParam::Hasher> map;
TypeParam a(0);
map[a] = 'c';
EXPECT_EQ('c', map[a]);
map[++a] = 'o';
EXPECT_EQ('o', map[a]);
TypeParam b(a);
EXPECT_EQ(typename TypeParam::Hasher()(a), typename TypeParam::Hasher()(b));
}
TYPED_TEST(IntTypeTest, TestValueAccessor) {
constexpr typename TypeParam::ValueType i = -1;
constexpr TypeParam int_type(i);
EXPECT_EQ(i, int_type.value());
static_assert(int_type.value() == i, "value() failed");
EXPECT_EQ(static_cast<int>(i), int_type.template value<int>());
EXPECT_EQ(static_cast<int8>(i), int_type.template value<int8>());
EXPECT_EQ(static_cast<int16>(i), int_type.template value<int16>());
EXPECT_EQ(static_cast<int32>(i), int_type.template value<int32>());
EXPECT_EQ(static_cast<uint32>(i), int_type.template value<uint32>());
EXPECT_EQ(static_cast<int64_t>(i), int_type.template value<int64_t>());
EXPECT_EQ(static_cast<uint64>(i), int_type.template value<uint64>());
EXPECT_EQ(static_cast<long>(i), int_type.template value<long>());
static_assert(int_type.template value<int>() == static_cast<int>(i),
"value<Value>() failed");
}
TYPED_TEST(IntTypeTest, TestMove) {
struct NotCopyable {
TypeParam inttype;
std::unique_ptr<int> ptr;
static NotCopyable Make(int i) {
NotCopyable f;
f.inttype = TypeParam(i);
f.ptr.reset(new int(i));
return f;
}
};
NotCopyable foo = NotCopyable::Make(123);
EXPECT_EQ(123, foo.inttype);
EXPECT_EQ(123, *foo.ptr);
foo = NotCopyable::Make(321);
EXPECT_EQ(321, foo.inttype);
EXPECT_EQ(321, *foo.ptr);
}
} | constexpr const ThisType operator~() const { return ThisType(~value_); } | TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
} |
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include <unordered_map>
#include <unordered_set>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status GetFunctionBody(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_attr_name,
const FunctionBody** fbody) {
NameAttrList name_attr_list;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_attr_name, &name_attr_list));
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()), &func_handle));
*fbody = flib_runtime->GetFunctionBody(func_handle);
return absl::OkStatus();
}
Status GetFunctionBodies(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_list_attr_name,
std::vector<const FunctionBody*>* fbodies) {
std::vector<NameAttrList> name_attr_lists;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_list_attr_name, &name_attr_lists));
for (const NameAttrList& name_attr_list : name_attr_lists) {
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()),
&func_handle));
fbodies->push_back(flib_runtime->GetFunctionBody(func_handle));
}
return absl::OkStatus();
}
Status CondConstInputIndices(
absl::Span<const FunctionBody* const> branch_bodies,
std::vector<int>* const_input_idxs, FunctionLibraryRuntime* flib_runtime) {
TF_RET_CHECK(!branch_bodies.empty());
TF_RET_CHECK(branch_bodies[0] != nullptr);
int num_inputs =
branch_bodies[0]->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
for (auto fbody : branch_bodies) {
TF_RET_CHECK(fbody != nullptr);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
}
for (int i = 0, end = compile_time_const_arg_indices.size(); i < end; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i + 1);
}
}
return absl::OkStatus();
}
Status GetCompileTimeConstInputs(const NodeDef& node, const OpKernel* op_kernel,
const OpDef* op_def,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
DCHECK(op_def != nullptr || op_kernel != nullptr);
if (node.op() == "While" || node.op() == "StatelessWhile") {
const FunctionBody* fcond = nullptr;
const FunctionBody* fbody = nullptr;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "cond", &fcond));
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "body", &fbody));
TF_RET_CHECK(fcond);
TF_RET_CHECK(fbody);
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fcond->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
TF_ASSIGN_OR_RETURN(
bool is_loop_invariant,
IsLoopInvariant(fbody, i,
flib_runtime->GetFunctionLibraryDefinition()));
if (is_loop_invariant) {
const_input_idxs->push_back(i);
} else {
Node* arg_i = fbody->arg_nodes[i];
Node* ret_i = fbody->ret_nodes[i];
VLOG(1) << "Argument " << i << " to while-loop " << node.name()
<< " has to be constant, but it's not a loop invariant, "
"cluster compilation likely to fail at compile time: "
<< arg_i->DebugString() << " vs. " << ret_i->DebugString();
VLOG(1) << node.ShortDebugString();
}
}
}
return absl::OkStatus();
} else if (node.op() == "If" || node.op() == "StatelessIf") {
const FunctionBody* fthen = nullptr;
const FunctionBody* felse = nullptr;
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "then_branch", &fthen));
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "else_branch", &felse));
return CondConstInputIndices({fthen, felse}, const_input_idxs,
flib_runtime);
} else if (node.op() == "Case" || node.op() == "StatelessCase") {
std::vector<const FunctionBody*> branch_bodies;
TF_RETURN_IF_ERROR(
GetFunctionBodies(flib_runtime, node, "branches", &branch_bodies));
return CondConstInputIndices(branch_bodies, const_input_idxs, flib_runtime);
} else if (node.op() == "PartitionedCall" ||
node.op() == "StatefulPartitionedCall") {
const FunctionBody* fbody;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "f", &fbody));
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i);
}
}
return absl::OkStatus();
} else if (op_def != nullptr) {
return XlaOpRegistry::CompileTimeConstantInputs(node, *op_def,
const_input_idxs);
} else {
return XlaOpRegistry::CompileTimeConstantInputs(*op_kernel,
const_input_idxs);
}
}
Status GetCompileTimeConstInputs(const Node* node,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(node->def(), nullptr,
&node->op_def(), const_input_idxs,
flib_runtime);
}
}
Status BackwardsConstAnalysis(
const Graph& g, std::vector<bool>* compile_time_const_arg_indices,
std::vector<bool>* compile_time_const_nodes,
FunctionLibraryRuntime* flib_runtime,
std::function<bool(const Edge&)> edge_filter_input) {
if (!compile_time_const_nodes && g.GetConstArgIndicesCache().has_value() &&
!edge_filter_input) {
VLOG(5) << "Using cached argument indices on graph " << &g;
*compile_time_const_arg_indices = g.GetConstArgIndicesCache().value();
return absl::OkStatus();
}
auto edge_filter = [&](const Edge& e) {
return edge_filter_input ? edge_filter_input(e) : true;
};
std::vector<bool> compile_time_const_nodes_impl;
if (compile_time_const_nodes) {
CHECK_EQ(compile_time_const_nodes->size(), g.num_node_ids());
} else {
compile_time_const_nodes_impl.resize(g.num_node_ids());
compile_time_const_nodes = &compile_time_const_nodes_impl;
}
Status status;
auto visit = [&](Node* node) {
if (!status.ok()) return;
if (XlaOpRegistry::IsMetadataOp(node->type_string())) {
VLOG(3) << "must-be-const node is metadata op: " << node->name();
return;
}
if ((*compile_time_const_nodes)[node->id()]) {
VLOG(3) << "marking consts for must-be-const node " << node->name();
if (node->type_string() == "_Arg") {
int index;
status = GetNodeAttr(node->attrs(), "index", &index);
if (!status.ok()) return;
if (compile_time_const_arg_indices) {
(*compile_time_const_arg_indices)[index] = true;
}
VLOG(3) << " const _Arg " << index << ": " << node->name();
return;
}
for (const Edge* pred : node->in_edges()) {
if (!pred->IsControlEdge() && edge_filter(*pred)) {
while (edge_filter(*pred) && IsConstTraversableOpType(pred->src())) {
status = pred->src()->input_edge(pred->src_output(), &pred);
if (!status.ok()) return;
}
if (edge_filter(*pred)) {
VLOG(4) << " " << pred->src()->name() << " must be const (is "
<< pred->src()->type_string() << ")";
(*compile_time_const_nodes)[pred->src()->id()] = true;
}
}
}
return;
}
std::vector<int> const_input_idxs;
status = GetCompileTimeConstInputs(node, &const_input_idxs, flib_runtime);
if (!status.ok() || const_input_idxs.empty()) {
return;
}
VLOG(3) << "marking consts for must-be-const inputs of " << node->name();
for (Edge const* edge : node->in_edges()) {
if (!edge->IsControlEdge() &&
absl::c_binary_search(const_input_idxs, edge->dst_input()) &&
edge_filter(*edge)) {
while (edge_filter(*edge) && IsConstTraversableOpType(edge->src())) {
status = edge->src()->input_edge(edge->src_output(), &edge);
if (!status.ok()) return;
}
if (edge_filter(*edge)) {
VLOG(4) << " input " << edge->dst_input() << ": "
<< edge->src()->name() << " must be const (is "
<< edge->src()->type_string() << ")";
(*compile_time_const_nodes)[edge->src()->id()] = true;
}
}
}
};
DFS(g, {}, visit, NodeComparatorName{},
[](const Edge& edge) { return !edge.src()->IsNextIteration(); });
if (compile_time_const_arg_indices && !edge_filter_input) {
VLOG(5) << "Setting the cache on the graph: " << &g;
g.GetConstArgIndicesCache() = *compile_time_const_arg_indices;
}
return status;
}
Status GetCompileTimeConstInputs(const OpKernel* op_kernel,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(op_kernel->def(), op_kernel,
nullptr, const_input_idxs,
flib_runtime);
}
} | #include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(ConstAnalysisTest, Basics) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(root.WithOpName("Arg3"), DT_INT32, 3);
auto a = ops::Shape(root, arg0);
auto b = ops::Add(root, a, arg1);
auto c = ops::Reshape(root, arg2, b);
auto d = ops::Mul(root, c, ops::Sum(root, arg3, arg3));
FixupSourceAndSinkEdges(root.graph());
std::vector<bool> const_args(4, false);
std::vector<bool> const_nodes(root.graph()->num_node_ids(), false);
TF_ASSERT_OK(BackwardsConstAnalysis(*root.graph(), &const_args, &const_nodes,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true, false, true}));
EXPECT_FALSE(const_nodes[arg0.node()->id()]);
EXPECT_TRUE(const_nodes[arg1.node()->id()]);
EXPECT_FALSE(const_nodes[arg2.node()->id()]);
EXPECT_TRUE(const_nodes[arg3.node()->id()]);
}
TEST(ConstAnalysisTest, TopologicalOrder) {
for (bool order : {false, true}) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto a = ops::Reshape(root, arg0, arg1);
auto b = ops::Reshape(root, arg2, a);
if (order) {
std::swap(a, b);
}
auto c = ops::Add(root, a, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(3, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true, true, false}));
}
}
void TestFunctionCall(bool is_stateful_partitioned_call) {
FunctionDef callee = FunctionDefHelper::Define(
"Callee", {"t:float", "shape:int32"}, {"result:float"}, {},
{{{"result"}, "Reshape", {"t", "shape"}, {{"T", DT_FLOAT}}}});
FunctionDefLibrary flib;
*flib.add_function() = callee;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Scope root = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(root.WithOpName("tensor"), DT_FLOAT, 0);
auto arg1 = ops::_Arg(root.WithOpName("shape"), DT_INT32, 1);
NameAttrList call_attrs;
call_attrs.set_name("Callee");
if (is_stateful_partitioned_call) {
ops::StatefulPartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
} else {
ops::PartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
}
Graph graph(&flib_def);
TF_ASSERT_OK(root.ToGraph(&graph));
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(nullptr, Env::Default(),
nullptr,
TF_GRAPH_DEF_VERSION, &flib_def, opts));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
lib_runtime));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, PartitionedCall) {
TestFunctionCall(false);
}
TEST(ConstAnalysisTest, StatefulPartitionedCall) {
TestFunctionCall(true);
}
TEST(ConstAnalysisTest, DontFollowControlDependencies) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_0) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
reshape.node()->AddAttr(kXlaCompileTimeConstantInputsAttr,
std::vector<string>());
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, false}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_1) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg0, c1);
std::vector<string> add_constant_inputs;
add_constant_inputs.push_back("x");
add.node()->AddAttr(kXlaCompileTimeConstantInputsAttr, add_constant_inputs);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(1, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true}));
}
static bool Initialized = [] {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
return true;
}();
}
} | Status GetFunctionBodies(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_list_attr_name,
std::vector<const FunctionBody*>* fbodies) {
std::vector<NameAttrList> name_attr_lists;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_list_attr_name, &name_attr_lists));
for (const NameAttrList& name_attr_list : name_attr_lists) {
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()),
&func_handle));
fbodies->push_back(flib_runtime->GetFunctionBody(func_handle));
}
return absl::OkStatus();
} | TEST(ConstAnalysisTest, PartitionedCall) {
TestFunctionCall(false);
}
TEST(ConstAnalysisTest, StatefulPartitionedCall) {
TestFunctionCall(true);
} |
#include "xla/service/gpu/fusion_merger.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
class FusionInstructionMerger {
public:
explicit FusionInstructionMerger(
HloComputation* computation, const se::DeviceDescription& gpu_device_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function)
: computation_(computation),
shape_size_function_(shape_size_function),
gpu_device_info_(gpu_device_info),
dump_fusion_visualization_(computation->parent()
->config()
.debug_options()
.xla_dump_fusion_visualization()) {}
absl::Status Run();
bool changed() const { return changed_; }
private:
FusionDecision ShouldFuse(HloInstruction* producer);
absl::Status FuseIntoAllUsers(HloInstruction* producer);
HloComputation* computation_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
std::optional<GpuHloCostAnalysis> cost_analysis_;
FusionInfoCache fusion_info_cache_;
const se::DeviceDescription& gpu_device_info_;
bool changed_ = false;
bool dump_fusion_visualization_ = false;
int total_visited_ = 0;
int total_merged_ = 0;
int num_fail_no_users_ = 0;
int num_fail_not_loop_fusion_ = 0;
int num_fail_merge_all_users_ = 0;
int num_fail_inefficient_fusion_emitter_ = 0;
int num_fail_fusion_too_large_ = 0;
int num_fail_uncoalesced_read_ = 0;
int num_fail_slower_if_fused_ = 0;
FusionInstructionMerger(const FusionInstructionMerger&) = delete;
FusionInstructionMerger& operator=(const FusionInstructionMerger&) = delete;
};
absl::Status FusionInstructionMerger::FuseIntoAllUsers(
HloInstruction* producer) {
std::vector<HloInstruction*> users = producer->users();
for (HloInstruction* user : users) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*user,
producer);
}
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(user));
HloInstruction* consumer = user;
if (consumer->opcode() != HloOpcode::kFusion) {
consumer = computation_->AddInstruction(HloInstruction::CreateFusion(
user->shape(), ChooseFusionKind(*producer, *user), user));
TF_CHECK_OK(computation_->ReplaceInstruction(user, consumer));
}
consumer->MergeFusionInstruction(producer);
TF_RETURN_IF_ERROR(cost_analysis_->RevisitInstruction(consumer));
fusion_info_cache_.Invalidate(consumer);
if (dump_fusion_visualization_) {
RegisterFusionState(*computation_,
absl::StrCat("Fused |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*consumer);
}
changed_ = true;
}
CHECK_EQ(0, producer->user_count()) << producer->ToString();
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(producer));
fusion_info_cache_.Invalidate(producer);
VLOG(2) << "Merged fusion instruction: " << producer->name()
<< " into users { "
<< absl::StrJoin(users, ", ",
[](std::string* out, HloInstruction* user) {
absl::StrAppend(out, user->name());
})
<< " }";
return absl::OkStatus();
}
absl::Status FusionInstructionMerger::Run() {
for (HloInstruction* producer : computation_->MakeInstructionPostOrder()) {
if (producer->opcode() != HloOpcode::kFusion) {
continue;
}
FusionDecision should_fuse = ShouldFuse(producer);
if (should_fuse) {
TF_RETURN_IF_ERROR(FuseIntoAllUsers(producer));
++total_merged_;
} else {
VLOG(3) << "Not fusing fusion |" << producer->name()
<< "| with all of it's users due to: " << should_fuse.Explain();
if (dump_fusion_visualization_ && !producer->users().empty()) {
RegisterFusionState(
*computation_,
absl::StrCat(
"Not fusing fusion |", producer->name(),
"| into all of its users due to: ", should_fuse.Explain()),
*producer->users()[0],
producer);
}
}
}
VLOG(1) << "FusionInstructionMerger EXIT"
<< " computation: " << computation_->name()
<< " total_visited: " << total_visited_
<< " total_merged: " << total_merged_ << " merge failures { "
<< " no_users: " << num_fail_no_users_
<< " not_loop_fusion: " << num_fail_not_loop_fusion_
<< " merge_all_users: " << num_fail_merge_all_users_
<< " uncoalesced_read: " << num_fail_uncoalesced_read_
<< " inefficient_fusion_emitter: "
<< num_fail_inefficient_fusion_emitter_
<< " slower_if_fused: " << num_fail_slower_if_fused_
<< " fusion_too_large: " << num_fail_fusion_too_large_ << " }";
return absl::OkStatus();
}
bool TransposesMostData(const HloInstruction& fusion) {
float score = 0;
for (const HloInstruction* instr : fusion.fused_instructions()) {
if (IsPhysicallyTransposing(*instr)) {
score += 1.0 * ShapeUtil::ElementsInRecursive(instr->shape()) /
ShapeUtil::ElementsInRecursive(fusion.shape());
if (score >= 0.5) {
VLOG(3) << fusion.ToString() << " transpose ratio exceeds " << score;
return true;
}
}
}
return false;
}
FusionDecision FusionInstructionMerger::ShouldFuse(HloInstruction* producer) {
++total_visited_;
VLOG(4) << "Considering producer " << producer->name();
if (producer->users().empty()) {
++num_fail_no_users_;
return "fusion has no users";
}
if (!producer->IsLoopFusion()) {
++num_fail_not_loop_fusion_;
return "not a loop fusion";
}
auto producer_hero = GetRealHeroForMultiOutputFusion(*producer);
bool has_reduction_user = false;
for (const HloInstruction* user : producer->users()) {
if (user->opcode() == HloOpcode::kBitcast) {
++num_fail_merge_all_users_;
return "not fusing bitcast ops";
}
if (user->IsCustomFusion()) {
++num_fail_merge_all_users_;
return "not fusing custom fusions";
}
auto consumer_hero = GetRealHeroForMultiOutputFusion(*user);
if (auto compatible =
FusionHeroesAreCompatible(producer_hero, consumer_hero);
!compatible) {
return compatible;
}
FusionDecision fusible = IsProducerConsumerFusible(*producer, *user);
if (!fusible) {
++num_fail_merge_all_users_;
VLOG(9) << user->ToString();
return fusible;
}
if (IsInputFusibleReduction(*user)) {
has_reduction_user = true;
}
}
if (has_reduction_user && TransposesMostData(*producer)) {
++num_fail_uncoalesced_read_;
return "would read mostly uncoalesced";
}
for (const HloInstruction* user : producer->users()) {
FusionDecision fits = FusionFitsInBudget(
*user, *producer, gpu_device_info_,
true, &fusion_info_cache_);
if (!fits) {
++num_fail_fusion_too_large_;
return fits;
}
}
if (!cost_analysis_) {
VLOG(2) << "Running full HLO cost analysis for " << computation_->name();
cost_analysis_.emplace(
GpuHloCostAnalysis::Options{shape_size_function_,
{},
true},
&gpu_device_info_);
TF_CHECK_OK(computation_->Accept(&cost_analysis_.value()));
}
for (const HloInstruction* user : producer->users()) {
if (cost_analysis_->ProducerConsumerMergedTooLarge(*producer, *user)) {
++num_fail_inefficient_fusion_emitter_;
return FusionDecision{} << "if merged with " << user->name()
<< " will generate huge IR";
}
}
GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes(
producer, &*cost_analysis_, GpuPerformanceModelOptions::Default(),
producer->users());
if (t.time_fused > t.time_unfused) {
++num_fail_slower_if_fused_;
return "will execute slower if fused";
}
return {};
}
absl::StatusOr<bool> FusionMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(1) << "FusionMerger for module: " << module->name();
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
VLOG(9) << "Before running FusionInstructionMerger for computation: "
<< computation->name();
XLA_VLOG_LINES(9, computation->ToString());
FusionInstructionMerger fusion_merger(computation, gpu_device_info_,
shape_size_function_);
TF_RETURN_IF_ERROR(fusion_merger.Run());
changed |= fusion_merger.changed();
VLOG(9) << "After running FusionInstructionMerger for computation: "
<< computation->name() << " changed: " << changed;
XLA_VLOG_LINES(9, computation->ToString());
}
return changed;
}
}
} | #include "xla/service/gpu/fusion_merger.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class FusionMergerTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
FusionMerger fusion_merger_{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()};
FusionMergerTest() : HloTestBase() {}
};
TEST_F(FusionMergerTest, MergeSharedFusionInstruction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
comp.3 {
constant.param_0 = f32[4]{0} parameter(0)
param.param_1.2 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(1)
get-tuple-element.6 = f32[4]{0} get-tuple-element(param.param_1.2), index=0
ROOT add.7 = f32[4]{0} add(constant.param_0, get-tuple-element.6)
}
comp.2 {
param.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.4 = f32[4]{0} get-tuple-element(param.param_1.1), index=1
get-tuple-element.5 = f32[4]{0} get-tuple-element(param.param_1.1), index=2
ROOT add.6 = f32[4]{0} add(get-tuple-element.4, get-tuple-element.5)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY MergeSharedFusionInstruction.Computation0 {
constant = f32[4]{0} constant({1, 1, 1, 1})
param = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.3 = f32[4]{0} fusion(constant, param), kind=kLoop, calls=comp.3
fusion.4 = f32[4]{0} fusion(param), kind=kLoop, calls=comp.2
fusion.5 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp.1
fusion.6 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.5, fusion.6)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_EQ(HloOpcode::kTuple, root->opcode());
auto* operand0 = root->operand(0);
EXPECT_EQ(HloOpcode::kFusion, operand0->opcode());
EXPECT_EQ(4, operand0->fused_instruction_count());
auto* operand1 = root->operand(1);
EXPECT_EQ(HloOpcode::kFusion, operand1->opcode());
EXPECT_EQ(7, operand1->fused_instruction_count());
auto* operand2 = root->operand(2);
EXPECT_EQ(HloOpcode::kFusion, operand2->opcode());
EXPECT_EQ(7, operand2->fused_instruction_count());
}
TEST_F(FusionMergerTest, MoreMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT _ = f32[] add(x, y)
}
comp0 {
p = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
gte0 = f32[100000000] get-tuple-element(p), index=0
gte1 = f32[100000000] get-tuple-element(p), index=1
add.9 = f32[100000000] add(gte0, gte1)
gte2 = f32[100000000] get-tuple-element(p), index=2
add.10 = f32[100000000] add(add.9, gte2)
gte3 = f32[100000000] get-tuple-element(p), index=3
add.11 = f32[100000000] add(add.10, gte3)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
gte4 = f32[100000000] get-tuple-element(p1), index=0
gte5 = f32[100000000] get-tuple-element(p1), index=1
add.12 = f32[100000000] add(gte4, gte5)
gte6 = f32[100000000] get-tuple-element(p1), index=2
add.13 = f32[100000000] add(add.12, gte6)
gte7 = f32[100000000] get-tuple-element(p1), index=3
add.14 = f32[100000000] add(add.13, gte7)
ROOT r = f32[100000000] add(add.14, add.11)
}
comp1 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
}
comp2 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
ROOT n = f32[] negate(r)
}
ENTRY m.Computation2 {
p0 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
fusion.0 = f32[100000000] fusion(p0, p1), kind=kLoop, calls=comp0
fusion.1 = f32[] fusion(fusion.0), kind=kLoop, calls=comp1
fusion.2 = f32[] fusion(fusion.0), kind=kLoop, calls=comp2
ROOT tuple = (f32[], f32[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, LessMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
comp.2 {
state.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.5 = f32[4]{0} get-tuple-element(state.param_1.1), index=0
get-tuple-element.6 = f32[4]{0} get-tuple-element(state.param_1.1), index=1
add.7 = f32[4]{0} add(get-tuple-element.5, get-tuple-element.6)
get-tuple-element.7 = f32[4]{0} get-tuple-element(state.param_1.1), index=2
ROOT add.8 = f32[4]{0} add(add.7, get-tuple-element.7)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY m.Computation2 {
constant = f32[4]{0} constant({1, 1, 1, 1})
state = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.2 = f32[4]{0} fusion(state), kind=kLoop, calls=comp.2
fusion.3 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp.1
fusion.4 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.4)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeIntoInputFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[32]{0} parameter(0)
ROOT f1_root = f32[32]{0} add(f1_p0, f1_p0)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[32]{0} parameter(0)
f2_mul = f32[32]{0} multiply(f2_p0, f2_p0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_mul, f2_zero), dimensions={0},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[32]{0} parameter(0)
f1 = f32[32]{0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())));
}
TEST_F(FusionMergerTest, WillMergeIntoUnfusedConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_matmul.36
max (parameter.13: f32[], parameter.14: f32[]) -> f32[] {
parameter.13 = f32[] parameter(0)
parameter.14 = f32[] parameter(1)
ROOT maximum.15 = f32[] maximum(f32[] parameter.13, f32[] parameter.14)
}
add (parameter.29: f32[], parameter.30: f32[]) -> f32[] {
parameter.29 = f32[] parameter(0)
parameter.30 = f32[] parameter(1)
ROOT add.31 = f32[] add(f32[] parameter.29, f32[] parameter.30)
}
fused_computation.1 (param_1.4: f32[200,200,200], param_2.1: f32[200,200]) -> f32[200,200] {
param_1.4 = f32[200,200,200]{2,1,0} parameter(0)
param_2.1 = f32[200,200]{1,0} parameter(1)
broadcast.3 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_2.1), dimensions={0,2}
subtract.0 = f32[200,200,200]{2,1,0} subtract(f32[200,200,200]{2,1,0} param_1.4, f32[200,200,200]{2,1,0} broadcast.3)
exponential.0 = f32[200,200,200]{2,1,0} exponential(f32[200,200,200]{2,1,0} subtract.0)
constant.27 = f32[] constant(0)
ROOT reduce.0 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} exponential.0, f32[] constant.27), dimensions={1}, to_apply=add
}
fused_computation.3 (param_0.7: f32[200,200], param_1.9: f32[200,200]) -> f32[200,200,200] {
param_1.9 = f32[200,200]{1,0} parameter(1)
broadcast.10 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_1.9), dimensions={0,1}
param_0.7 = f32[200,200]{1,0} parameter(0)
broadcast.8 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_0.7), dimensions={1,2}
ROOT add.1 = f32[200,200,200]{2,1,0} add(f32[200,200,200]{2,1,0} broadcast.10, f32[200,200,200]{2,1,0} broadcast.8)
}
ENTRY entry (parameter.1: f32[200,200], parameter.2: f32[200,200]) -> f32[200,200] {
parameter.2 = f32[200,200]{1,0} parameter(1)
parameter.1 = f32[200,200]{1,0} parameter(0)
fusion.3 = f32[200,200,200]{2,1,0} fusion(f32[200,200]{1,0} parameter.2, f32[200,200]{1,0} parameter.1), kind=kLoop, calls=fused_computation.3
constant.11 = f32[] constant(-inf)
reduce.16 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} fusion.3, f32[] constant.11), dimensions={1}, to_apply=max
ROOT fusion.1 = f32[200,200]{1,0} fusion(f32[200,200,200]{2,1,0} fusion.3, f32[200,200]{1,0} reduce.16), kind=kInput, calls=fused_computation.1
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Fusion(), m::Parameter(), m::Parameter())));
}
TEST_F(FusionMergerTest, WillNotMergeReduceUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
add = f32[16,16,256]{0,1,2} add(f1_p0, f1_p0)
ROOT f1_root = f32[16,16,256]{2,1,0} copy(add)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeReduceNotTooUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
slice1 = f32[5,16,256]{0,1,2} slice(f1_p0), slice={[0:5], [0:16], [0:256]}
f1_copy = f32[5,16,256]{2,1,0} copy(slice1)
slice2 = f32[11,16,256]{0,1,2} slice(f1_p0), slice={[0:11], [0:16], [0:256]}
bitcast = f32[11,16,256]{2,1,0} bitcast(slice2)
ROOT f1_root = f32[16,16,256]{2,1,0} concatenate(f1_copy, bitcast), dimensions={0}
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1;
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> entry_params;
for (int64_t i = 0; i < kNumParams; ++i) {
entry_params.push_back(
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")));
}
auto make_fusion = [&](absl::Span<HloInstruction* const> params) {
HloComputation::Builder sub_builder("subcomp");
HloInstruction* sum = nullptr;
for (int64_t i = 0; i < params.size(); ++i) {
auto p = sub_builder.AddInstruction(
HloInstruction::CreateParameter(i, shape, "p"));
if (sum == nullptr) {
sum = p;
} else {
sum = sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, p));
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(sub_builder.Build());
return HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, params, subcomp);
};
auto fusion = b.AddInstruction(
make_fusion(absl::MakeSpan(entry_params)
.subspan(0, MaxOperandsAndOutputsPerFusion())));
b.AddInstruction(make_fusion({entry_params.back(), fusion}));
module->AddEntryComputation(b.Build());
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeIfFusionEmitterIsInefficient) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
Arg_0.5 = f32[200000] parameter(0)
slice.7 = f32[100000] slice(Arg_0.5), slice={[0:199999:2]}
slice.8 = f32[100000] slice(Arg_0.5), slice={[1:200000:2]}
add.9 = f32[100000] add(slice.7, slice.8)
slice.10 = f32[50000] slice(add.9), slice={[0:99999:2]}
slice.11 = f32[50000] slice(add.9), slice={[1:100000:2]}
add.12 = f32[50000] add(slice.10, slice.11)
slice.13 = f32[25000] slice(add.12), slice={[0:49999:2]}
slice.14 = f32[25000] slice(add.12), slice={[1:50000:2]}
add.15 = f32[25000] add(slice.13, slice.14)
slice.16 = f32[12500] slice(add.15), slice={[0:24999:2]}
slice.17 = f32[12500] slice(add.15), slice={[1:25000:2]}
add.18 = f32[12500] add(slice.16, slice.17)
slice.19 = f32[6250] slice(add.18), slice={[0:12499:2]}
slice.20 = f32[6250] slice(add.18), slice={[1:12500:2]}
add.21 = f32[6250] add(slice.19, slice.20)
slice.22 = f32[3125] slice(add.21), slice={[0:6249:2]}
slice.23 = f32[3125] slice(add.21), slice={[1:6250:2]}
ROOT add.24 = f32[3125] add(slice.22, slice.23)
}
f2 {
Arg_0 = f32[3125] parameter(0)
slice.25 = f32[1562] slice(Arg_0), slice={[0:3124:2]}
slice.26 = f32[1562] slice(Arg_0), slice={[1:3125:2]}
add.27 = f32[1562] add(slice.25, slice.26)
slice.28 = f32[781] slice(add.27), slice={[0:1561:2]}
slice.29 = f32[781] slice(add.27), slice={[1:1562:2]}
add.30 = f32[781] add(slice.28, slice.29)
slice.31 = f32[390] slice(add.30), slice={[0:780:2]}
slice.32 = f32[390] slice(add.30), slice={[1:781:2]}
add.33 = f32[390] add(slice.31, slice.32)
slice.34 = f32[195] slice(add.33), slice={[0:389:2]}
slice.35 = f32[195] slice(add.33), slice={[1:390:2]}
add.36 = f32[195] add(slice.34, slice.35)
slice.37 = f32[97] slice(add.36), slice={[0:194:2]}
slice.38 = f32[97] slice(add.36), slice={[1:195:2]}
add.39 = f32[97] add(slice.37, slice.38)
slice.40 = f32[48] slice(add.39), slice={[0:96:2]}
slice.41 = f32[48] slice(add.39), slice={[1:97:2]}
ROOT add.42 = f32[48] add(slice.40, slice.41)
}
ENTRY e {
p0 = f32[200000] parameter(0)
f1 = f32[3125] fusion(p0), kind=kLoop, calls=f1
ROOT r = f32[48] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeSliceIntoReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
p01 = s8[1000000] parameter(0)
ROOT s0 = s8[10] slice(p01), slice={[0:10]}
}
f2 {
p02 = s8[10] parameter(0)
ROOT b0 = s8[10,1000000] broadcast(p02), dimensions={0}
}
ENTRY e {
p0 = s8[1000000] parameter(0)
f1 = s8[10] fusion(p0), kind=kLoop, calls=f1
ROOT r = s8[10,1000000] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsIfSavesMemory) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_a (p: f32[]) -> f32[1024,1024,1024] {
%p = f32[] parameter(0)
%b = f32[1024,1024,1024] broadcast(%p), dimensions={}
ROOT %t = f32[1024,1024,1024] tanh(%b)
}
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
ENTRY entry {
p0 = f32[] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_a
f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_b
f3 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
ROOT f4 = f32[1024,1024,1024] add(f2, f3)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsWithSingleConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] add(%p, %p)
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeExpensiveFusionsWithReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b {
%p = f32[1024,1024,1024] parameter(0)
%t1 = f32[1024,1024,1024] tanh(%p)
%t2 = f32[1024,1024,1024] tanh(%t1)
%t3 = f32[1024,1024,1024] tanh(%t2)
%t4 = f32[1024,1024,1024] tanh(%t3)
%t5 = f32[1024,1024,1024] tanh(%t4)
%t6 = f32[1024,1024,1024] tanh(%t5)
%t7 = f32[1024,1024,1024] tanh(%t6)
%t8 = f32[1024,1024,1024] tanh(%t7)
ROOT %t9 = f32[1024,1024,1024] tanh(%t8)
}
%f_c {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024,2048] broadcast(%p), dimensions={0,1,2}
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024,2048] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, NoMergeWithBitcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.103 {
param_0.310 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.420 = f32[8,512]{1,0} parameter(1)
bitcast.1144 = f32[1,8,512]{2,1,0} bitcast(param_1.420)
convert.252 = f16[1,8,512]{2,1,0} convert(bitcast.1144)
bitcast.1143 = f16[8,512]{1,0} bitcast(convert.252)
broadcast.481 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1143), dimensions={1,2}
divide.15 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.310, broadcast.481)
ROOT bitcast.1142 = f16[8,512,1536]{1,2,0} bitcast(divide.15)
}
fused_computation.105 {
param_1.426 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1896 = f16[1,8,1536,512]{3,2,1,0} bitcast(param_1.426)
transpose.238 = f16[1,8,512,1536]{2,3,1,0} transpose(bitcast.1896), dimensions={0,1,3,2}
param_0.315 = f16[8,512]{1,0} parameter(0)
broadcast.482 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.315), dimensions={1,2}
subtract.22 = f16[1,8,512,1536]{2,3,1,0} subtract(transpose.238, broadcast.482)
ROOT exponential.15 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.22)
}
fused_computation.104 {
param_0.1000 = f16[8,1536,512]{2,1,0} parameter(0)
convert.652 = f32[8,1536,512]{2,1,0} convert(param_0.1000)
constant_752 = f32[] constant(-0)
ROOT reduce.232 = f32[8,512]{1,0} reduce(convert.652, constant_752),
dimensions={1}, to_apply=f32add
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.105 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.105
bitcast.1787 = f16[8,1536,512]{2,1,0} bitcast(fusion.105)
fusion.104 = f32[8,512]{1,0} fusion(bitcast.1787), kind=kInput, calls=fused_computation.104
ROOT fusion.103 = f16[8,512,1536]{1,2,0} fusion(fusion.105, fusion.104), kind=kLoop, calls=fused_computation.103
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.45 {
param_1.194 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1042 = f16[1,8,512,1536]{2,3,1,0} bitcast(param_1.194)
param_0.135 = f16[8,512]{1,0} parameter(0)
broadcast.391 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.135), dimensions={1,2}
subtract.6 = f16[1,8,512,1536]{2,3,1,0} subtract(bitcast.1042, broadcast.391)
ROOT exponential.11 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.6)
}
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.44 {
param_0.869 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
convert.221 = f32[1,8,512,1536]{2,3,1,0} convert(param_0.869)
transpose.212 = f32[1,8,1536,512]{3,2,1,0} transpose(convert.221), dimensions={0,1,3,2}
bitcast.1041 = f32[8,1536,512]{2,1,0} bitcast(transpose.212)
constant_429 = f32[] constant(0)
ROOT reduce.149 = f32[8,512]{1,0} reduce(bitcast.1041, constant_429), dimensions={1}, to_apply=f32add
}
fused_computation.43 {
param_0.130 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.188 = f32[8,512]{1,0} parameter(1)
bitcast.1040 = f32[1,8,512]{2,1,0} bitcast(param_1.188)
convert.220 = f16[1,8,512]{2,1,0} convert(bitcast.1040)
bitcast.1039 = f16[8,512]{1,0} bitcast(convert.220)
broadcast.390 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1039), dimensions={1,2}
divide.11 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.130, broadcast.390)
ROOT bitcast.1038 = f16[8,512,1536]{1,2,0} bitcast(divide.11)
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.45 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.45
fusion.44 = f32[8,512]{1,0} fusion(fusion.45), kind=kInput, calls=fused_computation.44
ROOT fusion.43 = f16[8,512,1536]{1,2,0} fusion(fusion.45, fusion.44), kind=kLoop, calls=fused_computation.43
}
)")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedNoMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
add_float_.56 {
x.57 = f32[] parameter(0)
y.58 = f32[] parameter(1)
ROOT add.59 = f32[] add(x.57, y.58)
}
fused_computation.66 {
constant.635 = f32[] constant(0)
broadcast.257 = f32[459,3]{1,0} broadcast(constant.635), dimensions={}
constant.641 = f32[] constant(1)
broadcast.256 = f32[459,3]{1,0} broadcast(constant.641), dimensions={}
broadcast.255 = f32[459]{0} broadcast(constant.635), dimensions={}
iota.28 = f32[459]{0} iota(), iota_dimension=0
constant.629 = f32[] constant(1.49891067)
broadcast.253 = f32[459]{0} broadcast(constant.629), dimensions={}
multiply.39 = f32[459]{0} multiply(iota.28, broadcast.253)
constant.633 = f32[] constant(-1)
broadcast.252 = f32[459]{0} broadcast(constant.633), dimensions={}
add.31 = f32[459]{0} add(multiply.39, broadcast.252)
ceil.11 = f32[459]{0} ceil(add.31)
constant.630 = f32[] constant(685)
broadcast.251 = f32[459]{0} broadcast(constant.630), dimensions={}
clamp.49 = f32[459]{0} clamp(broadcast.255, ceil.11, broadcast.251)
subtract.11 = f32[459]{0} subtract(clamp.49, multiply.39)
broadcast.249 = f32[459,3]{1,0} broadcast(subtract.11), dimensions={0}
iota.26 = f32[459,3]{1,0} iota(), iota_dimension=1
add.30 = f32[459,3]{1,0} add(broadcast.249, iota.26)
abs.3 = f32[459,3]{1,0} abs(add.30)
subtract.10 = f32[459,3]{1,0} subtract(broadcast.256, abs.3)
maximum.6 = f32[459,3]{1,0} maximum(broadcast.257, subtract.10)
ROOT reduce.3 = f32[459]{0} reduce(maximum.6, constant.635), dimensions={1}, to_apply=add_float_.56
}
fused_computation.67 {
constant.684 = f32[] constant(0)
broadcast.296 = f32[1130,3]{1,0} broadcast(constant.684), dimensions={}
constant.685 = f32[] constant(1)
broadcast.295 = f32[1130,3]{1,0} broadcast(constant.685), dimensions={}
broadcast.294 = f32[1130]{0} broadcast(constant.684), dimensions={}
iota.41 = f32[1130]{0} iota(), iota_dimension=0
constant.675 = f32[] constant(1.34513271)
broadcast.293 = f32[1130]{0} broadcast(constant.675), dimensions={}
multiply.47 = f32[1130]{0} multiply(iota.41, broadcast.293)
constant.677 = f32[] constant(-1)
broadcast.290 = f32[1130]{0} broadcast(constant.677), dimensions={}
add.39 = f32[1130]{0} add(multiply.47, broadcast.290)
ceil.15 = f32[1130]{0} ceil(add.39)
constant.676 = f32[] constant(1517)
broadcast.289 = f32[1130]{0} broadcast(constant.676), dimensions={}
clamp.53 = f32[1130]{0} clamp(broadcast.294, ceil.15, broadcast.289)
subtract.19 = f32[1130]{0} subtract(clamp.53, multiply.47)
broadcast.287 = f32[1130,3]{1,0} broadcast(subtract.19), dimensions={0}
iota.39 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.38 = f32[1130,3]{1,0} add(broadcast.287, iota.39)
abs.7 = f32[1130,3]{1,0} abs(add.38)
subtract.18 = f32[1130,3]{1,0} subtract(broadcast.295, abs.7)
maximum.10 = f32[1130,3]{1,0} maximum(broadcast.296, subtract.18)
ROOT reduce.4 = f32[1130]{0} reduce(maximum.10, constant.684), dimensions={1}, to_apply=add_float_.56
}
fused_computation.59 {
constant.532 = f32[] constant(0)
broadcast.316 = f32[1130,3]{1,0} broadcast(constant.532), dimensions={}
constant.663 = f32[] constant(1)
broadcast.315 = f32[1130,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.314 = f32[1130]{0} broadcast(constant.532), dimensions={}
iota.47 = f32[1130]{0} iota(), iota_dimension=0
constant.579 = f32[] constant(1.34513271)
broadcast.311 = f32[1130]{0} broadcast(constant.579), dimensions={}
multiply.51 = f32[1130]{0} multiply(iota.47, broadcast.311)
constant.578 = f32[] constant(-1)
broadcast.310 = f32[1130]{0} broadcast(constant.578), dimensions={}
add.43 = f32[1130]{0} add(multiply.51, broadcast.310)
ceil.17 = f32[1130]{0} ceil(add.43)
constant.576 = f32[] constant(1517)
broadcast.309 = f32[1130]{0} broadcast(constant.576), dimensions={}
clamp.55 = f32[1130]{0} clamp(broadcast.314, ceil.17, broadcast.309)
subtract.24 = f32[1130]{0} subtract(clamp.55, multiply.51)
broadcast.306 = f32[1130,3]{1,0} broadcast(subtract.24), dimensions={0}
iota.45 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.42 = f32[1130,3]{1,0} add(broadcast.306, iota.45)
abs.9 = f32[1130,3]{1,0} abs(add.42)
subtract.23 = f32[1130,3]{1,0} subtract(broadcast.315, abs.9)
maximum.12 = f32[1130,3]{1,0} maximum(broadcast.316, subtract.23)
param_2.183 = f32[1130]{0} parameter(2)
broadcast.172 = f32[1130,3]{1,0} broadcast(param_2.183), dimensions={0}
divide.3 = f32[1130,3]{1,0} divide(maximum.12, broadcast.172)
bitcast.53 = f32[3390]{0} bitcast(divide.3)
broadcast.171 = f32[3390,1377]{1,0} broadcast(bitcast.53), dimensions={0}
broadcast.276 = f32[459,3]{1,0} broadcast(constant.532), dimensions={}
broadcast.275 = f32[459,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.274 = f32[459]{0} broadcast(constant.532), dimensions={}
iota.35 = f32[459]{0} iota(), iota_dimension=0
constant.614 = f32[] constant(1.49891067)
broadcast.273 = f32[459]{0} broadcast(constant.614), dimensions={}
multiply.43 = f32[459]{0} multiply(iota.35, broadcast.273)
broadcast.272 = f32[459]{0} broadcast(constant.578), dimensions={}
add.35 = f32[459]{0} add(multiply.43, broadcast.272)
ceil.13 = f32[459]{0} ceil(add.35)
constant.611 = f32[] constant(685)
broadcast.269 = f32[459]{0} broadcast(constant.611), dimensions={}
clamp.51 = f32[459]{0} clamp(broadcast.274, ceil.13, broadcast.269)
subtract.15 = f32[459]{0} subtract(clamp.51, multiply.43)
broadcast.267 = f32[459,3]{1,0} broadcast(subtract.15), dimensions={0}
iota.33 = f32[459,3]{1,0} iota(), iota_dimension=1
add.34 = f32[459,3]{1,0} add(broadcast.267, iota.33)
abs.5 = f32[459,3]{1,0} abs(add.34)
subtract.14 = f32[459,3]{1,0} subtract(broadcast.275, abs.5)
maximum.8 = f32[459,3]{1,0} maximum(broadcast.276, subtract.14)
param_1.177 = f32[459]{0} parameter(1)
broadcast.170 = f32[459,3]{1,0} broadcast(param_1.177), dimensions={0}
divide.2 = f32[459,3]{1,0} divide(maximum.8, broadcast.170)
bitcast.52 = f32[1377]{0} bitcast(divide.2)
broadcast.169 = f32[3390,1377]{1,0} broadcast(bitcast.52), dimensions={1}
multiply.15 = f32[3390,1377]{1,0} multiply(broadcast.171, broadcast.169)
bitcast.61 = f32[1130,3,459,3]{3,2,1,0} bitcast(multiply.15)
transpose.68 = f32[459,1130,3,3]{2,0,3,1} transpose(bitcast.61), dimensions={2,0,3,1}
copy.1 = f | absl::Status FusionInstructionMerger::FuseIntoAllUsers(
HloInstruction* producer) {
std::vector<HloInstruction*> users = producer->users();
for (HloInstruction* user : users) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*user,
producer);
}
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(user));
HloInstruction* consumer = user;
if (consumer->opcode() != HloOpcode::kFusion) {
consumer = computation_->AddInstruction(HloInstruction::CreateFusion(
user->shape(), ChooseFusionKind(*producer, *user), user));
TF_CHECK_OK(computation_->ReplaceInstruction(user, consumer));
}
consumer->MergeFusionInstruction(producer);
TF_RETURN_IF_ERROR(cost_analysis_->RevisitInstruction(consumer));
fusion_info_cache_.Invalidate(consumer);
if (dump_fusion_visualization_) {
RegisterFusionState(*computation_,
absl::StrCat("Fused |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*consumer);
}
changed_ = true;
}
CHECK_EQ(0, producer->user_count()) << producer->ToString();
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(producer));
fusion_info_cache_.Invalidate(producer);
VLOG(2) << "Merged fusion instruction: " << producer->name()
<< " into users { "
<< absl::StrJoin(users, ", ",
[](std::string* out, HloInstruction* user) {
absl::StrAppend(out, user->name());
})
<< " }";
return absl::OkStatus();
} | TEST_F(FusionMergerTest, MergeSharedFusionInstruction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
comp.3 {
constant.param_0 = f32[4]{0} parameter(0)
param.param_1.2 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(1)
get-tuple-element.6 = f32[4]{0} get-tuple-element(param.param_1.2), index=0
ROOT add.7 = f32[4]{0} add(constant.param_0, get-tuple-element.6)
}
comp.2 {
param.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.4 = f32[4]{0} get-tuple-element(param.param_1.1), index=1
get-tuple-element.5 = f32[4]{0} get-tuple-element(param.param_1.1), index=2
ROOT add.6 = f32[4]{0} add(get-tuple-element.4, get-tuple-element.5)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY MergeSharedFusionInstruction.Computation0 {
constant = f32[4]{0} constant({1, 1, 1, 1})
param = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.3 = f32[4]{0} fusion(constant, param), kind=kLoop, calls=comp.3
fusion.4 = f32[4]{0} fusion(param), kind=kLoop, calls=comp.2
fusion.5 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp.1
fusion.6 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.5, fusion.6)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_EQ(HloOpcode::kTuple, root->opcode());
auto* operand0 = root->operand(0);
EXPECT_EQ(HloOpcode::kFusion, operand0->opcode());
EXPECT_EQ(4, operand0->fused_instruction_count());
auto* operand1 = root->operand(1);
EXPECT_EQ(HloOpcode::kFusion, operand1->opcode());
EXPECT_EQ(7, operand1->fused_instruction_count());
auto* operand2 = root->operand(2);
EXPECT_EQ(HloOpcode::kFusion, operand2->opcode());
EXPECT_EQ(7, operand2->fused_instruction_count());
}
TEST_F(FusionMergerTest, MoreMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT _ = f32[] add(x, y)
}
comp0 {
p = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
gte0 = f32[100000000] get-tuple-element(p), index=0
gte1 = f32[100000000] get-tuple-element(p), index=1
add.9 = f32[100000000] add(gte0, gte1)
gte2 = f32[100000000] get-tuple-element(p), index=2
add.10 = f32[100000000] add(add.9, gte2)
gte3 = f32[100000000] get-tuple-element(p), index=3
add.11 = f32[100000000] add(add.10, gte3)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
gte4 = f32[100000000] get-tuple-element(p1), index=0
gte5 = f32[100000000] get-tuple-element(p1), index=1
add.12 = f32[100000000] add(gte4, gte5)
gte6 = f32[100000000] get-tuple-element(p1), index=2
add.13 = f32[100000000] add(add.12, gte6)
gte7 = f32[100000000] get-tuple-element(p1), index=3
add.14 = f32[100000000] add(add.13, gte7)
ROOT r = f32[100000000] add(add.14, add.11)
}
comp1 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
}
comp2 {
p = f32[100000000] parameter(0)
c0 = f32[] constant(0)
r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
ROOT n = f32[] negate(r)
}
ENTRY m.Computation2 {
p0 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(0)
p1 = (f32[100000000], f32[100000000], f32[100000000], f32[100000000]) parameter(1)
fusion.0 = f32[100000000] fusion(p0, p1), kind=kLoop, calls=comp0
fusion.1 = f32[] fusion(fusion.0), kind=kLoop, calls=comp1
fusion.2 = f32[] fusion(fusion.0), kind=kLoop, calls=comp2
ROOT tuple = (f32[], f32[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, LessMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
comp.2 {
state.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.5 = f32[4]{0} get-tuple-element(state.param_1.1), index=0
get-tuple-element.6 = f32[4]{0} get-tuple-element(state.param_1.1), index=1
add.7 = f32[4]{0} add(get-tuple-element.5, get-tuple-element.6)
get-tuple-element.7 = f32[4]{0} get-tuple-element(state.param_1.1), index=2
ROOT add.8 = f32[4]{0} add(add.7, get-tuple-element.7)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY m.Computation2 {
constant = f32[4]{0} constant({1, 1, 1, 1})
state = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.2 = f32[4]{0} fusion(state), kind=kLoop, calls=comp.2
fusion.3 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp.1
fusion.4 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.4)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeIntoInputFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[32]{0} parameter(0)
ROOT f1_root = f32[32]{0} add(f1_p0, f1_p0)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[32]{0} parameter(0)
f2_mul = f32[32]{0} multiply(f2_p0, f2_p0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_mul, f2_zero), dimensions={0},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[32]{0} parameter(0)
f1 = f32[32]{0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())));
}
TEST_F(FusionMergerTest, WillMergeIntoUnfusedConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_matmul.36
max (parameter.13: f32[], parameter.14: f32[]) -> f32[] {
parameter.13 = f32[] parameter(0)
parameter.14 = f32[] parameter(1)
ROOT maximum.15 = f32[] maximum(f32[] parameter.13, f32[] parameter.14)
}
add (parameter.29: f32[], parameter.30: f32[]) -> f32[] {
parameter.29 = f32[] parameter(0)
parameter.30 = f32[] parameter(1)
ROOT add.31 = f32[] add(f32[] parameter.29, f32[] parameter.30)
}
fused_computation.1 (param_1.4: f32[200,200,200], param_2.1: f32[200,200]) -> f32[200,200] {
param_1.4 = f32[200,200,200]{2,1,0} parameter(0)
param_2.1 = f32[200,200]{1,0} parameter(1)
broadcast.3 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_2.1), dimensions={0,2}
subtract.0 = f32[200,200,200]{2,1,0} subtract(f32[200,200,200]{2,1,0} param_1.4, f32[200,200,200]{2,1,0} broadcast.3)
exponential.0 = f32[200,200,200]{2,1,0} exponential(f32[200,200,200]{2,1,0} subtract.0)
constant.27 = f32[] constant(0)
ROOT reduce.0 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} exponential.0, f32[] constant.27), dimensions={1}, to_apply=add
}
fused_computation.3 (param_0.7: f32[200,200], param_1.9: f32[200,200]) -> f32[200,200,200] {
param_1.9 = f32[200,200]{1,0} parameter(1)
broadcast.10 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_1.9), dimensions={0,1}
param_0.7 = f32[200,200]{1,0} parameter(0)
broadcast.8 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_0.7), dimensions={1,2}
ROOT add.1 = f32[200,200,200]{2,1,0} add(f32[200,200,200]{2,1,0} broadcast.10, f32[200,200,200]{2,1,0} broadcast.8)
}
ENTRY entry (parameter.1: f32[200,200], parameter.2: f32[200,200]) -> f32[200,200] {
parameter.2 = f32[200,200]{1,0} parameter(1)
parameter.1 = f32[200,200]{1,0} parameter(0)
fusion.3 = f32[200,200,200]{2,1,0} fusion(f32[200,200]{1,0} parameter.2, f32[200,200]{1,0} parameter.1), kind=kLoop, calls=fused_computation.3
constant.11 = f32[] constant(-inf)
reduce.16 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} fusion.3, f32[] constant.11), dimensions={1}, to_apply=max
ROOT fusion.1 = f32[200,200]{1,0} fusion(f32[200,200,200]{2,1,0} fusion.3, f32[200,200]{1,0} reduce.16), kind=kInput, calls=fused_computation.1
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Fusion(), m::Parameter(), m::Parameter())));
}
TEST_F(FusionMergerTest, WillNotMergeReduceUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
add = f32[16,16,256]{0,1,2} add(f1_p0, f1_p0)
ROOT f1_root = f32[16,16,256]{2,1,0} copy(add)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeReduceNotTooUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
slice1 = f32[5,16,256]{0,1,2} slice(f1_p0), slice={[0:5], [0:16], [0:256]}
f1_copy = f32[5,16,256]{2,1,0} copy(slice1)
slice2 = f32[11,16,256]{0,1,2} slice(f1_p0), slice={[0:11], [0:16], [0:256]}
bitcast = f32[11,16,256]{2,1,0} bitcast(slice2)
ROOT f1_root = f32[16,16,256]{2,1,0} concatenate(f1_copy, bitcast), dimensions={0}
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1;
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> entry_params;
for (int64_t i = 0; i < kNumParams; ++i) {
entry_params.push_back(
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")));
}
auto make_fusion = [&](absl::Span<HloInstruction* const> params) {
HloComputation::Builder sub_builder("subcomp");
HloInstruction* sum = nullptr;
for (int64_t i = 0; i < params.size(); ++i) {
auto p = sub_builder.AddInstruction(
HloInstruction::CreateParameter(i, shape, "p"));
if (sum == nullptr) {
sum = p;
} else {
sum = sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, p));
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(sub_builder.Build());
return HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, params, subcomp);
};
auto fusion = b.AddInstruction(
make_fusion(absl::MakeSpan(entry_params)
.subspan(0, MaxOperandsAndOutputsPerFusion())));
b.AddInstruction(make_fusion({entry_params.back(), fusion}));
module->AddEntryComputation(b.Build());
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeIfFusionEmitterIsInefficient) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
Arg_0.5 = f32[200000] parameter(0)
slice.7 = f32[100000] slice(Arg_0.5), slice={[0:199999:2]}
slice.8 = f32[100000] slice(Arg_0.5), slice={[1:200000:2]}
add.9 = f32[100000] add(slice.7, slice.8)
slice.10 = f32[50000] slice(add.9), slice={[0:99999:2]}
slice.11 = f32[50000] slice(add.9), slice={[1:100000:2]}
add.12 = f32[50000] add(slice.10, slice.11)
slice.13 = f32[25000] slice(add.12), slice={[0:49999:2]}
slice.14 = f32[25000] slice(add.12), slice={[1:50000:2]}
add.15 = f32[25000] add(slice.13, slice.14)
slice.16 = f32[12500] slice(add.15), slice={[0:24999:2]}
slice.17 = f32[12500] slice(add.15), slice={[1:25000:2]}
add.18 = f32[12500] add(slice.16, slice.17)
slice.19 = f32[6250] slice(add.18), slice={[0:12499:2]}
slice.20 = f32[6250] slice(add.18), slice={[1:12500:2]}
add.21 = f32[6250] add(slice.19, slice.20)
slice.22 = f32[3125] slice(add.21), slice={[0:6249:2]}
slice.23 = f32[3125] slice(add.21), slice={[1:6250:2]}
ROOT add.24 = f32[3125] add(slice.22, slice.23)
}
f2 {
Arg_0 = f32[3125] parameter(0)
slice.25 = f32[1562] slice(Arg_0), slice={[0:3124:2]}
slice.26 = f32[1562] slice(Arg_0), slice={[1:3125:2]}
add.27 = f32[1562] add(slice.25, slice.26)
slice.28 = f32[781] slice(add.27), slice={[0:1561:2]}
slice.29 = f32[781] slice(add.27), slice={[1:1562:2]}
add.30 = f32[781] add(slice.28, slice.29)
slice.31 = f32[390] slice(add.30), slice={[0:780:2]}
slice.32 = f32[390] slice(add.30), slice={[1:781:2]}
add.33 = f32[390] add(slice.31, slice.32)
slice.34 = f32[195] slice(add.33), slice={[0:389:2]}
slice.35 = f32[195] slice(add.33), slice={[1:390:2]}
add.36 = f32[195] add(slice.34, slice.35)
slice.37 = f32[97] slice(add.36), slice={[0:194:2]}
slice.38 = f32[97] slice(add.36), slice={[1:195:2]}
add.39 = f32[97] add(slice.37, slice.38)
slice.40 = f32[48] slice(add.39), slice={[0:96:2]}
slice.41 = f32[48] slice(add.39), slice={[1:97:2]}
ROOT add.42 = f32[48] add(slice.40, slice.41)
}
ENTRY e {
p0 = f32[200000] parameter(0)
f1 = f32[3125] fusion(p0), kind=kLoop, calls=f1
ROOT r = f32[48] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeSliceIntoReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
p01 = s8[1000000] parameter(0)
ROOT s0 = s8[10] slice(p01), slice={[0:10]}
}
f2 {
p02 = s8[10] parameter(0)
ROOT b0 = s8[10,1000000] broadcast(p02), dimensions={0}
}
ENTRY e {
p0 = s8[1000000] parameter(0)
f1 = s8[10] fusion(p0), kind=kLoop, calls=f1
ROOT r = s8[10,1000000] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsIfSavesMemory) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_a (p: f32[]) -> f32[1024,1024,1024] {
%p = f32[] parameter(0)
%b = f32[1024,1024,1024] broadcast(%p), dimensions={}
ROOT %t = f32[1024,1024,1024] tanh(%b)
}
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
ENTRY entry {
p0 = f32[] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_a
f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_b
f3 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
ROOT f4 = f32[1024,1024,1024] add(f2, f3)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsWithSingleConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] add(%p, %p)
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeExpensiveFusionsWithReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b {
%p = f32[1024,1024,1024] parameter(0)
%t1 = f32[1024,1024,1024] tanh(%p)
%t2 = f32[1024,1024,1024] tanh(%t1)
%t3 = f32[1024,1024,1024] tanh(%t2)
%t4 = f32[1024,1024,1024] tanh(%t3)
%t5 = f32[1024,1024,1024] tanh(%t4)
%t6 = f32[1024,1024,1024] tanh(%t5)
%t7 = f32[1024,1024,1024] tanh(%t6)
%t8 = f32[1024,1024,1024] tanh(%t7)
ROOT %t9 = f32[1024,1024,1024] tanh(%t8)
}
%f_c {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024,2048] broadcast(%p), dimensions={0,1,2}
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024,2048] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, NoMergeWithBitcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.103 {
param_0.310 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.420 = f32[8,512]{1,0} parameter(1)
bitcast.1144 = f32[1,8,512]{2,1,0} bitcast(param_1.420)
convert.252 = f16[1,8,512]{2,1,0} convert(bitcast.1144)
bitcast.1143 = f16[8,512]{1,0} bitcast(convert.252)
broadcast.481 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1143), dimensions={1,2}
divide.15 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.310, broadcast.481)
ROOT bitcast.1142 = f16[8,512,1536]{1,2,0} bitcast(divide.15)
}
fused_computation.105 {
param_1.426 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1896 = f16[1,8,1536,512]{3,2,1,0} bitcast(param_1.426)
transpose.238 = f16[1,8,512,1536]{2,3,1,0} transpose(bitcast.1896), dimensions={0,1,3,2}
param_0.315 = f16[8,512]{1,0} parameter(0)
broadcast.482 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.315), dimensions={1,2}
subtract.22 = f16[1,8,512,1536]{2,3,1,0} subtract(transpose.238, broadcast.482)
ROOT exponential.15 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.22)
}
fused_computation.104 {
param_0.1000 = f16[8,1536,512]{2,1,0} parameter(0)
convert.652 = f32[8,1536,512]{2,1,0} convert(param_0.1000)
constant_752 = f32[] constant(-0)
ROOT reduce.232 = f32[8,512]{1,0} reduce(convert.652, constant_752),
dimensions={1}, to_apply=f32add
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.105 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.105
bitcast.1787 = f16[8,1536,512]{2,1,0} bitcast(fusion.105)
fusion.104 = f32[8,512]{1,0} fusion(bitcast.1787), kind=kInput, calls=fused_computation.104
ROOT fusion.103 = f16[8,512,1536]{1,2,0} fusion(fusion.105, fusion.104), kind=kLoop, calls=fused_computation.103
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.45 {
param_1.194 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1042 = f16[1,8,512,1536]{2,3,1,0} bitcast(param_1.194)
param_0.135 = f16[8,512]{1,0} parameter(0)
broadcast.391 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.135), dimensions={1,2}
subtract.6 = f16[1,8,512,1536]{2,3,1,0} subtract(bitcast.1042, broadcast.391)
ROOT exponential.11 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.6)
}
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.44 {
param_0.869 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
convert.221 = f32[1,8,512,1536]{2,3,1,0} convert(param_0.869)
transpose.212 = f32[1,8,1536,512]{3,2,1,0} transpose(convert.221), dimensions={0,1,3,2}
bitcast.1041 = f32[8,1536,512]{2,1,0} bitcast(transpose.212)
constant_429 = f32[] constant(0)
ROOT reduce.149 = f32[8,512]{1,0} reduce(bitcast.1041, constant_429), dimensions={1}, to_apply=f32add
}
fused_computation.43 {
param_0.130 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.188 = f32[8,512]{1,0} parameter(1)
bitcast.1040 = f32[1,8,512]{2,1,0} bitcast(param_1.188)
convert.220 = f16[1,8,512]{2,1,0} convert(bitcast.1040)
bitcast.1039 = f16[8,512]{1,0} bitcast(convert.220)
broadcast.390 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1039), dimensions={1,2}
divide.11 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.130, broadcast.390)
ROOT bitcast.1038 = f16[8,512,1536]{1,2,0} bitcast(divide.11)
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.45 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.45
fusion.44 = f32[8,512]{1,0} fusion(fusion.45), kind=kInput, calls=fused_computation.44
ROOT fusion.43 = f16[8,512,1536]{1,2,0} fusion(fusion.45, fusion.44), kind=kLoop, calls=fused_computation.43
}
)")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
} |
#include "internal/proto_time_encoding.h"
#include <string>
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/util/time_util.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "internal/status_macros.h"
#include "internal/time.h"
namespace cel::internal {
namespace {
absl::Status Validate(absl::Time time) {
if (time < cel::internal::MinTimestamp()) {
return absl::InvalidArgumentError("time below min");
}
if (time > cel::internal::MaxTimestamp()) {
return absl::InvalidArgumentError("time above max");
}
return absl::OkStatus();
}
absl::Status CelValidateDuration(absl::Duration duration) {
if (duration < cel::internal::MinDuration()) {
return absl::InvalidArgumentError("duration below min");
}
if (duration > cel::internal::MaxDuration()) {
return absl::InvalidArgumentError("duration above max");
}
return absl::OkStatus();
}
}
absl::Duration DecodeDuration(const google::protobuf::Duration& proto) {
return absl::Seconds(proto.seconds()) + absl::Nanoseconds(proto.nanos());
}
absl::Time DecodeTime(const google::protobuf::Timestamp& proto) {
return absl::FromUnixSeconds(proto.seconds()) +
absl::Nanoseconds(proto.nanos());
}
absl::Status EncodeDuration(absl::Duration duration,
google::protobuf::Duration* proto) {
CEL_RETURN_IF_ERROR(CelValidateDuration(duration));
const int64_t s = absl::IDivDuration(duration, absl::Seconds(1), &duration);
const int64_t n =
absl::IDivDuration(duration, absl::Nanoseconds(1), &duration);
proto->set_seconds(s);
proto->set_nanos(n);
return absl::OkStatus();
}
absl::StatusOr<std::string> EncodeDurationToString(absl::Duration duration) {
google::protobuf::Duration d;
auto status = EncodeDuration(duration, &d);
if (!status.ok()) {
return status;
}
return google::protobuf::util::TimeUtil::ToString(d);
}
absl::Status EncodeTime(absl::Time time, google::protobuf::Timestamp* proto) {
CEL_RETURN_IF_ERROR(Validate(time));
const int64_t s = absl::ToUnixSeconds(time);
proto->set_seconds(s);
proto->set_nanos((time - absl::FromUnixSeconds(s)) / absl::Nanoseconds(1));
return absl::OkStatus();
}
absl::StatusOr<std::string> EncodeTimeToString(absl::Time time) {
google::protobuf::Timestamp t;
auto status = EncodeTime(time, &t);
if (!status.ok()) {
return status;
}
return google::protobuf::util::TimeUtil::ToString(t);
}
} | #include "internal/proto_time_encoding.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "absl/time/time.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace cel::internal {
namespace {
using ::google::api::expr::testutil::EqualsProto;
TEST(EncodeDuration, Basic) {
google::protobuf::Duration proto_duration;
ASSERT_OK(
EncodeDuration(absl::Seconds(2) + absl::Nanoseconds(3), &proto_duration));
EXPECT_THAT(proto_duration, EqualsProto("seconds: 2 nanos: 3"));
}
TEST(EncodeDurationToString, Basic) {
ASSERT_OK_AND_ASSIGN(
std::string json,
EncodeDurationToString(absl::Seconds(5) + absl::Nanoseconds(20)));
EXPECT_EQ(json, "5.000000020s");
}
TEST(EncodeTime, Basic) {
google::protobuf::Timestamp proto_timestamp;
ASSERT_OK(EncodeTime(absl::FromUnixMillis(300000), &proto_timestamp));
EXPECT_THAT(proto_timestamp, EqualsProto("seconds: 300"));
}
TEST(EncodeTimeToString, Basic) {
ASSERT_OK_AND_ASSIGN(std::string json,
EncodeTimeToString(absl::FromUnixMillis(80030)));
EXPECT_EQ(json, "1970-01-01T00:01:20.030Z");
}
TEST(DecodeDuration, Basic) {
google::protobuf::Duration proto_duration;
proto_duration.set_seconds(450);
proto_duration.set_nanos(4);
EXPECT_EQ(DecodeDuration(proto_duration),
absl::Seconds(450) + absl::Nanoseconds(4));
}
TEST(DecodeTime, Basic) {
google::protobuf::Timestamp proto_timestamp;
proto_timestamp.set_seconds(450);
EXPECT_EQ(DecodeTime(proto_timestamp), absl::FromUnixSeconds(450));
}
}
} | absl::Duration DecodeDuration(const google::protobuf::Duration& proto) {
return absl::Seconds(proto.seconds()) + absl::Nanoseconds(proto.nanos());
} | TEST(DecodeDuration, Basic) {
google::protobuf::Duration proto_duration;
proto_duration.set_seconds(450);
proto_duration.set_nanos(4);
EXPECT_EQ(DecodeDuration(proto_duration),
absl::Seconds(450) + absl::Nanoseconds(4));
} |
#include <math.h>
#include <stddef.h>
#include <stdlib.h>
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/dequantize.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/dequantize.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace numeric_verify {
static constexpr const char kToleranceStr[] = "tolerance";
static constexpr const char kLogIfFailedStr[] = "log_if_failed";
static constexpr const int kTemporaryDequantizedTensor = 0;
static constexpr const int kOutputTensor = 0;
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
ref = GetInput(context, node, 1);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* ref;
TfLiteTensor* output;
};
const int kTensorNotAllocated = -1;
struct OpData {
float tolerance;
bool float_input_initialized;
int cache_tensor_id = kTensorNotAllocated;
bool log_if_failed;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->float_input_initialized = false;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
const float tolerance = m[kToleranceStr].AsFloat();
const bool log_if_failed = m[kLogIfFailedStr].AsBool();
op_data->tolerance = tolerance;
op_data->log_if_failed = log_if_failed;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32);
if (op_data->cache_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &op_data->cache_tensor_id));
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->cache_tensor_id;
TfLiteTensor* dequantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kTemporaryDequantizedTensor,
&dequantized));
dequantized->type = op_context.ref->type;
dequantized->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(
context, dequantized,
TfLiteIntArrayCopy(op_context.input->dims)));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputTensor, &op_context.output));
op_context.output->type = kTfLiteFloat32;
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
}
static int32_t GetQuantizedValue(const OpContext& op_context, int index) {
switch (op_context.input->type) {
case kTfLiteUInt8:
return GetTensorData<uint8_t>(op_context.input)[index];
case kTfLiteInt8:
return GetTensorData<int8_t>(op_context.input)[index];
case kTfLiteInt16:
return GetTensorData<int16_t>(op_context.input)[index];
default:
return 0;
}
}
template <builtin::dequantize::KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
if (IsConstantTensor(op_context.input) && op_data->float_input_initialized) {
return kTfLiteOk;
}
TfLiteTensor* dequantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kTemporaryDequantizedTensor,
&dequantized));
auto status = builtin::dequantize::DequantizeImpl<kernel_type>(
context, node, op_context.input, dequantized);
if (status != kTfLiteOk) {
return status;
}
if (IsConstantTensor(op_context.input)) {
op_data->float_input_initialized = true;
}
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputTensor, &op_context.output));
auto output_data = GetTensorData<float>(op_context.output);
const int n = NumElements(dequantized);
if (op_data->log_if_failed && op_data->tolerance >= 0.1) {
auto max_diff = op_data->tolerance * op_context.input->params.scale;
for (int i = 0; i < n; ++i) {
int32_t value = GetQuantizedValue(op_context, i);
float dequant = GetTensorData<float>(dequantized)[i];
float reference = GetTensorData<float>(op_context.ref)[i];
output_data[i] = dequant - reference;
float diff = std::abs(output_data[i]);
if (diff > max_diff) {
TF_LITE_KERNEL_LOG(
context,
"Mismatch: %f is quantized to %d with (%f, %d). "
"abs(%f - %f) = %f > %f (tolerance) range percentage %f.\n",
reference, value, op_context.input->params.scale,
op_context.input->params.zero_point, reference, dequant, diff,
max_diff, op_data->tolerance);
return kTfLiteError;
}
}
} else {
std::vector<double> diffs, temp;
diffs.reserve(n);
temp.reserve(n);
diffs.resize(n);
temp.resize(n);
for (int i = 0; i < n; ++i) {
float dequant = GetTensorData<float>(dequantized)[i];
float reference = GetTensorData<float>(op_context.ref)[i];
diffs[i] = static_cast<double>(dequant - reference);
output_data[i] = dequant - reference;
}
double mean =
std::accumulate(diffs.begin(), diffs.end(), 0.0) / diffs.size();
double max_diff = 0.0;
std::transform(diffs.begin(), diffs.end(), temp.begin(),
[mean, &max_diff](double x) {
max_diff = std::max(max_diff, std::abs(x));
return x - mean;
});
double sq_sum =
std::inner_product(temp.begin(), temp.end(), temp.begin(), 0.0);
double std = std::sqrt(sq_sum / diffs.size());
TF_LITE_KERNEL_LOG(
context,
"std: %f, mean: %f, max_diff: %f (scale: %f, zero_point: %d).\n", std,
mean, max_diff, op_context.input->params.scale,
op_context.input->params.zero_point);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NUMERIC_VERIFY_OPT() {
static TfLiteRegistration r = {
numeric_verify::Init, numeric_verify::Free, numeric_verify::Prepare,
numeric_verify::Eval<builtin::dequantize::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_NUMERIC_VERIFY_REF() {
static TfLiteRegistration r = {
numeric_verify::Init, numeric_verify::Free, numeric_verify::Prepare,
numeric_verify::Eval<builtin::dequantize::kReference>};
return &r;
}
TfLiteRegistration* Register_NUMERIC_VERIFY() {
#ifdef USE_NEON
return Register_NUMERIC_VERIFY_OPT();
#else
return Register_NUMERIC_VERIFY_REF();
#endif
}
}
}
} | #include <string.h>
#include <cstdint>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "Eigen/Core"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_NUMERIC_VERIFY();
}
}
namespace {
class NumericVerifyOpModel : public SingleOpModel {
public:
NumericVerifyOpModel(TensorType type, std::initializer_list<int> shape,
float scale, int32_t zero_point, int version,
float tolerance = 5.0, bool log_if_failed = true) {
const TensorData input_tensor_data = {type, shape, 0, 0, scale, zero_point};
input_ = AddInput(input_tensor_data);
ref_ = AddInput({TensorType_FLOAT32, shape});
output_ = AddOutput({TensorType_FLOAT32, shape});
std::vector<uint8_t> custom_options(sizeof(float));
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Float("tolerance", tolerance);
fbb.Bool("log_if_failed", log_if_failed);
});
fbb.Finish();
SetCustomOp("NUMERIC_VERIFY", fbb.GetBuffer(),
ops::custom::Register_NUMERIC_VERIFY);
BuildInterpreter({GetShape(input_), GetShape(ref_)});
}
template <typename T>
void SetInputs(std::initializer_list<T> data,
std::initializer_list<float> ref_data) {
PopulateTensor(input_, data);
PopulateTensor(ref_, ref_data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int ref_;
int output_;
};
TEST(NumericVerifyOpTest, Uint8) {
NumericVerifyOpModel m(TensorType_UINT8, {2, 5}, 0.5, 127, 1);
m.SetInputs<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Float16) {
NumericVerifyOpModel m(TensorType_FLOAT16, {2, 3}, 1.0f, 0, 3,
0.1f);
std::vector<Eigen::half> half{Eigen::half{-535.54f}, Eigen::half{-100.0f},
Eigen::half{-1.0f}, Eigen::half{0.f},
Eigen::half{1.0f}, Eigen::half{100.32f}};
m.PopulateTensor(0, 0, reinterpret_cast<TfLiteFloat16*>(half.data()),
reinterpret_cast<TfLiteFloat16*>(half.data()) + half.size());
m.PopulateTensor(1, {-535.54f, -100.0f, -1.0f, 0.f, 1.0f, 100.32f});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Int16) {
NumericVerifyOpModel m(TensorType_INT16, {2, 5}, 0.5, -1, 4);
m.SetInputs<int16_t>(
{-130, -127, -126, -125, -124, 123, 124, 125, 126, 130},
{-64.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 65.5});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpFailedTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 0, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(NumericVerifyOpDebugModeTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2, 5.0, false);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 0, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0, 0, -61.5, 0, 0, 0, 0})));
}
}
} | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32);
if (op_data->cache_tensor_id == kTensorNotAllocated) {
TF_LITE_ENSURE_OK(
context, context->AddTensors(context, 1, &op_data->cache_tensor_id));
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->cache_tensor_id;
TfLiteTensor* dequantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kTemporaryDequantizedTensor,
&dequantized));
dequantized->type = op_context.ref->type;
dequantized->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(
context, dequantized,
TfLiteIntArrayCopy(op_context.input->dims)));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kOutputTensor, &op_context.output));
op_context.output->type = kTfLiteFloat32;
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
} | TEST(NumericVerifyOpTest, Uint8) {
NumericVerifyOpModel m(TensorType_UINT8, {2, 5}, 0.5, 127, 1);
m.SetInputs<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Float16) {
NumericVerifyOpModel m(TensorType_FLOAT16, {2, 3}, 1.0f, 0, 3,
0.1f);
std::vector<Eigen::half> half{Eigen::half{-535.54f}, Eigen::half{-100.0f},
Eigen::half{-1.0f}, Eigen::half{0.f},
Eigen::half{1.0f}, Eigen::half{100.32f}};
m.PopulateTensor(0, 0, reinterpret_cast<TfLiteFloat16*>(half.data()),
reinterpret_cast<TfLiteFloat16*>(half.data()) + half.size());
m.PopulateTensor(1, {-535.54f, -100.0f, -1.0f, 0.f, 1.0f, 100.32f});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpTest, Int16) {
NumericVerifyOpModel m(TensorType_INT16, {2, 5}, 0.5, -1, 4);
m.SetInputs<int16_t>(
{-130, -127, -126, -125, -124, 123, 124, 125, 126, 130},
{-64.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 65.5});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
}
TEST(NumericVerifyOpFailedTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 0, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(NumericVerifyOpDebugModeTest, Int8) {
NumericVerifyOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2, 5.0, false);
m.SetInputs<int8_t>({-128, -127, -126, -125, -124, 0, 124, 125, 126, 127},
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64});
EXPECT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0, 0, -61.5, 0, 0, 0, 0})));
} |
#include "quiche/balsa/balsa_headers_sequence.h"
#include <memory>
#include <utility>
#include "quiche/balsa/balsa_headers.h"
namespace quiche {
void BalsaHeadersSequence::Append(std::unique_ptr<BalsaHeaders> headers) {
sequence_.push_back(std::move(headers));
}
bool BalsaHeadersSequence::HasNext() const { return next_ < sequence_.size(); }
BalsaHeaders* BalsaHeadersSequence::PeekNext() {
if (!HasNext()) {
return nullptr;
}
return sequence_[next_].get();
}
BalsaHeaders* BalsaHeadersSequence::Next() {
if (!HasNext()) {
return nullptr;
}
return sequence_[next_++].get();
}
void BalsaHeadersSequence::Clear() {
sequence_.clear();
next_ = 0;
}
} | #include "quiche/balsa/balsa_headers_sequence.h"
#include <memory>
#include <utility>
#include "quiche/balsa/balsa_headers.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
namespace {
TEST(BalsaHeadersSequenceTest, Initial) {
BalsaHeadersSequence sequence;
EXPECT_FALSE(sequence.HasNext());
EXPECT_EQ(sequence.Next(), nullptr);
EXPECT_TRUE(sequence.IsEmpty());
}
TEST(BalsaHeadersSequenceTest, Basic) {
BalsaHeadersSequence sequence;
auto headers_one = std::make_unique<BalsaHeaders>();
headers_one->AppendHeader("one", "fish");
sequence.Append(std::move(headers_one));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
auto headers_two = std::make_unique<BalsaHeaders>();
headers_two->AppendHeader("two", "fish");
sequence.Append(std::move(headers_two));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
const BalsaHeaders* headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_FALSE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
EXPECT_EQ(sequence.Next(), nullptr);
}
TEST(BalsaHeadersSequenceTest, Clear) {
BalsaHeadersSequence sequence;
auto headers_one = std::make_unique<BalsaHeaders>();
headers_one->AppendHeader("one", "fish");
sequence.Append(std::move(headers_one));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
auto headers_two = std::make_unique<BalsaHeaders>();
headers_two->AppendHeader("two", "fish");
sequence.Append(std::move(headers_two));
EXPECT_TRUE(sequence.HasNext());
EXPECT_FALSE(sequence.IsEmpty());
sequence.Clear();
EXPECT_FALSE(sequence.HasNext());
EXPECT_EQ(sequence.Next(), nullptr);
EXPECT_TRUE(sequence.IsEmpty());
}
TEST(BalsaHeadersSequenceTest, PeekNext) {
BalsaHeadersSequence sequence;
EXPECT_EQ(sequence.PeekNext(), nullptr);
auto headers_one = std::make_unique<BalsaHeaders>();
headers_one->AppendHeader("one", "fish");
sequence.Append(std::move(headers_one));
EXPECT_TRUE(sequence.HasNext());
const BalsaHeaders* headers = sequence.PeekNext();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), headers);
auto headers_two = std::make_unique<BalsaHeaders>();
headers_two->AppendHeader("two", "fish");
sequence.Append(std::move(headers_two));
EXPECT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), headers);
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
headers = sequence.PeekNext();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_TRUE(sequence.HasNext());
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_FALSE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), nullptr);
}
TEST(BalsaHeadersSequenceTest, CanRetainValidReference) {
BalsaHeadersSequence sequence;
auto headers = std::make_unique<BalsaHeaders>();
headers->AppendHeader("one", "fish");
BalsaHeaders* headers_ptr = headers.get();
sequence.Append(std::move(headers));
ASSERT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.Next(), headers_ptr);
}
}
}
} | BalsaHeaders* BalsaHeadersSequence::PeekNext() {
if (!HasNext()) {
return nullptr;
}
return sequence_[next_].get();
} | TEST(BalsaHeadersSequenceTest, PeekNext) {
BalsaHeadersSequence sequence;
EXPECT_EQ(sequence.PeekNext(), nullptr);
auto headers_one = std::make_unique<BalsaHeaders>();
headers_one->AppendHeader("one", "fish");
sequence.Append(std::move(headers_one));
EXPECT_TRUE(sequence.HasNext());
const BalsaHeaders* headers = sequence.PeekNext();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), headers);
auto headers_two = std::make_unique<BalsaHeaders>();
headers_two->AppendHeader("two", "fish");
sequence.Append(std::move(headers_two));
EXPECT_TRUE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), headers);
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("one"));
EXPECT_TRUE(sequence.HasNext());
headers = sequence.PeekNext();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_TRUE(sequence.HasNext());
headers = sequence.Next();
ASSERT_NE(headers, nullptr);
EXPECT_TRUE(headers->HasHeader("two"));
EXPECT_FALSE(sequence.HasNext());
EXPECT_EQ(sequence.PeekNext(), nullptr);
} |
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <memory>
#include <vector>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace mirror_pad {
namespace {
const int kUnsetOffset = -1;
template <typename T>
struct EvalData {
const TfLiteTensor* padding_matrix = nullptr;
const TfLiteIntArray* input_dims = nullptr;
const std::vector<int>* output_dims_num_elements = nullptr;
const std::vector<int>* input_dims_num_elements = nullptr;
const T* input_data = nullptr;
int offset = kUnsetOffset;
T* output_data = nullptr;
int num_dims = 0;
};
template <typename T>
inline void GetPadding(const T* data, int offset, int64_t* left_pad,
int64_t* right_pad) {
*left_pad = static_cast<int64_t>(*(data + offset * 2));
*right_pad = static_cast<int64_t>(*(data + offset * 2 + 1));
}
inline void GetPadding(const TfLiteTensor* padding_matrix, int dimension,
int64_t* left_pad, int64_t* right_pad) {
switch (padding_matrix->type) {
case kTfLiteInt32:
GetPadding(padding_matrix->data.i32, dimension, left_pad, right_pad);
break;
case kTfLiteInt64:
GetPadding(padding_matrix->data.i64, dimension, left_pad, right_pad);
break;
default:
return;
}
}
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> GetPaddedOutputShape(
const TfLiteTensor* input, const TfLiteTensor* padding_matrix) {
const int input_dims = NumDimensions(input);
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
TfLiteIntArrayCreate(input_dims), TfLiteIntArrayFree);
int64_t left_pad = 0, right_pad = 0;
for (int i = 0; i < input_dims; ++i) {
GetPadding(padding_matrix, i, &left_pad, &right_pad);
shape->data[i] = SizeOfDimension(input, i) + left_pad + right_pad;
}
return shape;
}
inline int GetInputDimension(int padded_dimension, int left_pad, int right_pad,
int input_dim_size, int offset) {
if (padded_dimension < left_pad) {
const int original_ind = left_pad + offset - 1;
return original_ind - (std::min(padded_dimension, original_ind - offset));
}
padded_dimension -= left_pad;
if (padded_dimension >= input_dim_size) {
padded_dimension -= input_dim_size;
const int original_ind = input_dim_size - (1 + offset);
return original_ind - std::min(padded_dimension, original_ind);
}
return padded_dimension;
}
template <typename T>
int GetFlatIndex(int index, EvalData<T>* eval_data) {
int flat_index = 0;
int64_t left_pad = 0, right_pad = 0, dimension_index, index_in_input;
for (int i = 0; i < eval_data->num_dims; ++i) {
switch (eval_data->padding_matrix->type) {
case kTfLiteInt32:
GetPadding(eval_data->padding_matrix->data.i32, i, &left_pad,
&right_pad);
break;
case kTfLiteInt64:
GetPadding(eval_data->padding_matrix->data.i64, i, &left_pad,
&right_pad);
break;
default:
break;
}
dimension_index = index / (*eval_data->output_dims_num_elements)[i];
index_in_input =
GetInputDimension(dimension_index, left_pad, right_pad,
eval_data->input_dims->data[i], eval_data->offset);
flat_index += index_in_input * (*eval_data->input_dims_num_elements)[i];
index %= (*eval_data->output_dims_num_elements)[i];
}
return flat_index;
}
template <typename T>
struct MirrorPadWorkerTask : cpu_backend_threadpool::Task {
MirrorPadWorkerTask(EvalData<T>* eval_data, int start, int end)
: eval_data(eval_data), start(start), end(end) {}
void Run() override {
auto* input_data = eval_data->input_data;
auto* output_data = eval_data->output_data;
for (int i = start; i < end; ++i) {
output_data[i] = input_data[GetFlatIndex(i, eval_data)];
}
}
private:
EvalData<T>* eval_data;
int start;
int end;
};
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
ruy::profiler::ScopeLabel label("MirrorPad");
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
const TfLiteTensor* padding_matrix;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
auto* params =
reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data);
if (params == nullptr) {
return kTfLiteError;
}
const int input_dims = NumDimensions(input_tensor);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
if (IsDynamicTensor(output_tensor)) {
auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
if (output_size == nullptr) {
return kTfLiteError;
}
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, output_tensor, output_size.release()));
}
std::vector<int> output_dims_num_elements(input_dims, 1);
std::vector<int> input_dims_num_elements(input_dims, 1);
for (int i = input_dims - 2; i >= 0; i--) {
output_dims_num_elements[i] =
output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1];
input_dims_num_elements[i] =
input_dims_num_elements[i + 1] * input_tensor->dims->data[i + 1];
}
const int offset =
params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0
: 1;
CpuBackendContext* cpu_backend_context =
CpuBackendContext::GetFromContext(context);
const int thread_count = cpu_backend_context->max_num_threads();
TfLiteStatus status = kTfLiteOk;
const int output_size = NumElements(output_tensor);
#define TF_LITE_MIRROR_PAD(type) \
EvalData<type> eval_data; \
eval_data.input_data = GetTensorData<type>(input_tensor); \
eval_data.input_dims = input_tensor->dims; \
eval_data.input_dims = input_tensor->dims; \
eval_data.output_dims_num_elements = &output_dims_num_elements; \
eval_data.input_dims_num_elements = &input_dims_num_elements; \
eval_data.num_dims = input_dims; \
eval_data.offset = offset; \
eval_data.output_data = GetTensorData<type>(output_tensor); \
eval_data.padding_matrix = padding_matrix; \
std::vector<MirrorPadWorkerTask<type>> tasks; \
tasks.reserve(thread_count); \
int start = 0; \
for (int i = 0; i < thread_count; ++i) { \
int end = start + (output_size - start) / (thread_count - i); \
tasks.emplace_back(MirrorPadWorkerTask<type>(&eval_data, start, end)); \
start = end; \
} \
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), \
cpu_backend_context);
switch (output_tensor->type) {
case kTfLiteFloat32: {
TF_LITE_MIRROR_PAD(float);
break;
}
case kTfLiteInt32: {
TF_LITE_MIRROR_PAD(int32_t);
break;
}
case kTfLiteUInt8: {
TF_LITE_MIRROR_PAD(uint8_t);
break;
}
case kTfLiteInt8: {
TF_LITE_MIRROR_PAD(int8_t);
break;
}
case kTfLiteInt64: {
TF_LITE_MIRROR_PAD(int64_t);
break;
}
case kTfLiteInt16: {
TF_LITE_MIRROR_PAD(int16_t);
break;
}
default:
status = kTfLiteError;
break;
}
#undef TF_LITE_MIRROR_PAD
return status;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Free(TfLiteContext* context, void* buffer) {}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
const TfLiteTensor* padding_matrix;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0),
NumDimensions(input_tensor));
if (input_tensor->type == kTfLiteUInt8 || input_tensor->type == kTfLiteInt8 ||
input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.scale,
output_tensor->params.scale);
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point,
output_tensor->params.zero_point);
}
if (input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output_tensor->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(padding_matrix)) {
SetTensorToDynamic(output_tensor);
return kTfLiteOk;
}
auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
if (output_size == nullptr) {
return kTfLiteError;
}
return context->ResizeTensor(context, output_tensor, output_size.release());
}
}
TfLiteRegistration* Register_MIRROR_PAD() {
static TfLiteRegistration r = {mirror_pad::Init, mirror_pad::Free,
mirror_pad::Prepare, mirror_pad::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
template <typename T>
class MirrorPadOpModel : public SingleOpModelWithHexagon {
public:
MirrorPadOpModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings,
const TensorData& output, const tflite::MirrorPadMode mode) {
input_id_ = AddInput(input);
padding_matrix_id_ =
AddConstInput(TensorType_INT32, paddings, paddings_shape);
output_id_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MIRROR_PAD, BuiltinOptions_MirrorPadOptions,
CreateMirrorPadOptions(builder_, mode).Union());
BuildInterpreter({GetShape(input_id_), GetShape(padding_matrix_id_)});
}
int input_tensor_id() { return input_id_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }
protected:
int input_id_;
int padding_matrix_id_;
int output_id_;
};
TEST(MirrorPadTest, EmptyPad_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {0, 0, 0, 0},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TEST(MirrorPadTest, PadBothSides_Symmetric_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {2, 3}, -1.0, 1.0}, {2, 2},
{1, 1, 1, 1}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 1, 2, 3, 3, 1, 1, 2, 3, 3,
4, 4, 5, 6, 6, 4, 4, 5, 6, 6}));
}
TEST(MirrorPadTest, PadBothSides_Reflect_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {1, 1, 1, 1},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({5, 4, 5, 6, 5, 2, 1, 2, 3, 2,
5, 4, 5, 6, 5, 2, 1, 2, 3, 2}));
}
TEST(MirrorPadTest, PadOneSide_left_Reflect_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {2, 3}, -1.0, 1.0}, {2, 2},
{1, 0, 1, 0}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({5, 4, 5, 6, 2, 1, 2, 3, 5, 4, 5, 6}));
}
TEST(MirrorPadTest, PadOneSide_right_Symmetric_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {0, 1, 0, 1},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 2, 3, 3, 4, 5, 6, 6, 4, 5, 6, 6}));
}
TEST(MirrorPadTest, Pad_1D_Reflect_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {3}, -1.0, 1.0}, {1, 2},
{0, 2}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 2, 1}));
}
TEST(MirrorPadTest, Pad_1D_Symmetric_UInt8) {
MirrorPadOpModel<uint8_t> model({TensorType_UINT8, {3}, -1.0, 1.0}, {1, 2},
{0, 2}, {TensorType_UINT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 3, 2}));
}
TEST(MirrorPadTest, PadBothSides_Reflect_Whole_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {1, 1, 2, 2},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1,
6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1}));
}
} | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
const TfLiteTensor* padding_matrix;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0),
NumDimensions(input_tensor));
if (input_tensor->type == kTfLiteUInt8 || input_tensor->type == kTfLiteInt8 ||
input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.scale,
output_tensor->params.scale);
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point,
output_tensor->params.zero_point);
}
if (input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output_tensor->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(padding_matrix)) {
SetTensorToDynamic(output_tensor);
return kTfLiteOk;
}
auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
if (output_size == nullptr) {
return kTfLiteError;
}
return context->ResizeTensor(context, output_tensor, output_size.release());
} | TEST(MirrorPadTest, EmptyPad_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {0, 0, 0, 0},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TEST(MirrorPadTest, PadBothSides_Symmetric_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {2, 3}, -1.0, 1.0}, {2, 2},
{1, 1, 1, 1}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 1, 2, 3, 3, 1, 1, 2, 3, 3,
4, 4, 5, 6, 6, 4, 4, 5, 6, 6}));
}
TEST(MirrorPadTest, PadBothSides_Reflect_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {1, 1, 1, 1},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({5, 4, 5, 6, 5, 2, 1, 2, 3, 2,
5, 4, 5, 6, 5, 2, 1, 2, 3, 2}));
}
TEST(MirrorPadTest, PadOneSide_left_Reflect_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {2, 3}, -1.0, 1.0}, {2, 2},
{1, 0, 1, 0}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({5, 4, 5, 6, 2, 1, 2, 3, 5, 4, 5, 6}));
}
TEST(MirrorPadTest, PadOneSide_right_Symmetric_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {0, 1, 0, 1},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 2, 3, 3, 4, 5, 6, 6, 4, 5, 6, 6}));
}
TEST(MirrorPadTest, Pad_1D_Reflect_Int8) {
MirrorPadOpModel<int8_t> model({TensorType_INT8, {3}, -1.0, 1.0}, {1, 2},
{0, 2}, {TensorType_INT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<int8_t>(model.input_tensor_id(), {1, 2, 3});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 2, 1}));
}
TEST(MirrorPadTest, Pad_1D_Symmetric_UInt8) {
MirrorPadOpModel<uint8_t> model({TensorType_UINT8, {3}, -1.0, 1.0}, {1, 2},
{0, 2}, {TensorType_UINT8, {}, -1.0, 1.0},
tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 3, 2}));
}
TEST(MirrorPadTest, PadBothSides_Reflect_Whole_UInt8) {
MirrorPadOpModel<uint8_t> model(
{TensorType_UINT8, {2, 3}, -1.0, 1.0}, {2, 2}, {1, 1, 2, 2},
{TensorType_UINT8, {}, -1.0, 1.0}, tflite::MirrorPadMode_REFLECT);
model.PopulateTensor<uint8_t>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1,
6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1}));
} |
#include "tensorstore/internal/metrics/prometheus.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_metrics {
namespace {
static inline constexpr internal::AsciiSet kDigit{"0123456789"};
static inline constexpr internal::AsciiSet kMetricFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_:"};
static inline constexpr internal::AsciiSet kLabelFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"};
static inline constexpr internal::AsciiSet kValueUnreserved{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.~()"};
bool IsLegalPrometheusLabel(std::string_view label) {
if (label.empty() || !kLabelFirst.Test(label[0])) return false;
for (char c : label) {
if (!kLabelFirst.Test(c) && !kDigit.Test(c)) return false;
}
return true;
}
absl::Status AppendLabelValue(std::string* url, std::string_view label,
std::string_view value) {
if (!IsLegalPrometheusLabel(label)) {
return absl::InvalidArgumentError("");
}
if (value.empty()) {
absl::StrAppend(url, "/", label, "@base64/=");
}
for (char c : value) {
if (!kValueUnreserved.Test(c)) {
absl::StrAppend(url, "/", label, "@base64/",
absl::WebSafeBase64Escape(value));
return absl::OkStatus();
}
}
absl::StrAppend(url, "/", label, "/", value);
return absl::OkStatus();
}
std::string AsPrometheusString(std::string_view in, internal::AsciiSet first) {
while (!in.empty() && !first.Test(in[0])) {
in = in.substr(1);
}
while (!in.empty() && !first.Test(in[in.size() - 1]) &&
!kDigit.Test(in[in.size() - 1])) {
in = in.substr(0, in.size() - 1);
}
std::string raw(in);
for (char& c : raw) {
if (!first.Test(c) && !kDigit.Test(c)) c = '_';
}
return raw;
}
struct PrometheusValueLine {
const std::string& metric_name;
const char* suffix;
const std::string& label_str;
std::string operator()(int64_t x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(double x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(const std::string& x) { return {}; }
std::string operator()(std::monostate) { return {}; }
};
}
Result<internal_http::HttpRequest> BuildPrometheusPushRequest(
const PushGatewayConfig& config) {
if (config.job.empty()) {
return absl::InvalidArgumentError("PushGatewayConfig bad job");
}
if (!absl::StartsWith(config.host, "http:
!absl::StartsWith(config.host, "https:
return absl::InvalidArgumentError("PushGatewayConfig bad host");
}
std::string url = config.host;
if (!absl::EndsWith(url, "/")) {
absl::StrAppend(&url, "/metrics");
} else {
absl::StrAppend(&url, "metrics");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, "job", config.job));
if (!config.instance.empty()) {
TENSORSTORE_RETURN_IF_ERROR(
AppendLabelValue(&url, "instance", config.instance));
}
for (const auto& [k, v] : config.additional_labels) {
if (absl::EqualsIgnoreCase("job", k) ||
absl::EqualsIgnoreCase("instance", k)) {
return absl::InvalidArgumentError(
"PushGatewayConfig additional_labels cannot contain job or instance");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, k, v));
}
return internal_http::HttpRequestBuilder("PUT", std::move(url))
.BuildRequest();
}
void PrometheusExpositionFormat(
const CollectedMetric& metric,
absl::FunctionRef<void(std::string)> handle_line) {
std::string metric_name =
AsPrometheusString(metric.metric_name, kMetricFirst);
if (metric_name.empty()) return;
std::vector<std::string> prometheus_fields;
prometheus_fields.reserve(metric.field_names.size());
for (size_t i = 0; i < metric.field_names.size(); ++i) {
prometheus_fields.push_back(
AsPrometheusString(metric.field_names[i], kLabelFirst));
}
auto build_label_str = [&](auto& v) -> std::string {
assert(metric.field_names.size() == v.fields.size());
if (v.fields.empty()) return {};
std::string label_str;
for (size_t i = 0; i < metric.field_names.size(); ++i) {
absl::StrAppend(&label_str, i == 0 ? "" : ", ", prometheus_fields[i],
"=\"", absl::CEscape(v.fields[i]), "\"");
}
return label_str;
};
if (!metric.values.empty()) {
std::string line;
for (const auto& v : metric.values) {
std::string label_str = build_label_str(v);
line =
std::visit(PrometheusValueLine{metric_name, " ", label_str}, v.value);
if (!line.empty()) {
handle_line(std::move(line));
}
line = std::visit(PrometheusValueLine{metric_name, "_max ", label_str},
v.max_value);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
if (!metric.histograms.empty()) {
std::string line;
for (const auto& v : metric.histograms) {
std::string label_str = build_label_str(v);
struct Histogram {
std::vector<int64_t> buckets;
};
line = PrometheusValueLine{metric_name, "_mean ", label_str}(v.mean);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_count ", label_str}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_variance ",
label_str}(v.sum_of_squared_deviation);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_sum ",
label_str}(v.mean * v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) --end;
for (size_t i = 0; i < end; i++) {
std::string bucket_labels = absl::StrCat(
label_str, label_str.empty() ? "" : ", ", "le=\"", i, "\"");
line = PrometheusValueLine{metric_name, "_bucket ",
bucket_labels}(v.buckets[i]);
if (!line.empty()) {
handle_line(std::move(line));
}
}
std::string bucket_labels =
absl::StrCat(label_str, label_str.empty() ? "" : ", ", "le=\"+Inf\"");
line =
PrometheusValueLine{metric_name, "_bucket ", bucket_labels}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
}
}
} | #include "tensorstore/internal/metrics/prometheus.h"
#include <stdint.h>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/collect.h"
namespace {
using ::tensorstore::internal_metrics::BuildPrometheusPushRequest;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::PrometheusExpositionFormat;
using ::tensorstore::internal_metrics::PushGatewayConfig;
TEST(PrometheusTest, BuildPrometheusPushRequest) {
auto request = BuildPrometheusPushRequest(
PushGatewayConfig{"http:
EXPECT_TRUE(request.has_value());
EXPECT_EQ("http:
request->url);
}
TEST(PrometheusTest, PrometheusExpositionFormat) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::string> lines;
PrometheusExpositionFormat(
metric, [&](std::string line) { lines.push_back(std::move(line)); });
return lines;
};
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
EXPECT_THAT(format_lines(metric), ::testing::IsEmpty());
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
h.mean = 1;
h.sum_of_squared_deviation = 1;
h.buckets.push_back(0);
h.buckets.push_back(1);
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
v.max_value = int64_t{2};
EXPECT_THAT(format_lines(metric),
::testing::ElementsAre(
"metric_name {field_name=\"vv\"} 1",
"metric_name_max {field_name=\"vv\"} 2",
"metric_name_mean {field_name=\"hh\"} 1",
"metric_name_count {field_name=\"hh\"} 1",
"metric_name_variance {field_name=\"hh\"} 1",
"metric_name_sum {field_name=\"hh\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"0\"} 0",
"metric_name_bucket {field_name=\"hh\", le=\"1\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"+Inf\"} 1"));
}
} | void PrometheusExpositionFormat(
const CollectedMetric& metric,
absl::FunctionRef<void(std::string)> handle_line) {
std::string metric_name =
AsPrometheusString(metric.metric_name, kMetricFirst);
if (metric_name.empty()) return;
std::vector<std::string> prometheus_fields;
prometheus_fields.reserve(metric.field_names.size());
for (size_t i = 0; i < metric.field_names.size(); ++i) {
prometheus_fields.push_back(
AsPrometheusString(metric.field_names[i], kLabelFirst));
}
auto build_label_str = [&](auto& v) -> std::string {
assert(metric.field_names.size() == v.fields.size());
if (v.fields.empty()) return {};
std::string label_str;
for (size_t i = 0; i < metric.field_names.size(); ++i) {
absl::StrAppend(&label_str, i == 0 ? "" : ", ", prometheus_fields[i],
"=\"", absl::CEscape(v.fields[i]), "\"");
}
return label_str;
};
if (!metric.values.empty()) {
std::string line;
for (const auto& v : metric.values) {
std::string label_str = build_label_str(v);
line =
std::visit(PrometheusValueLine{metric_name, " ", label_str}, v.value);
if (!line.empty()) {
handle_line(std::move(line));
}
line = std::visit(PrometheusValueLine{metric_name, "_max ", label_str},
v.max_value);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
if (!metric.histograms.empty()) {
std::string line;
for (const auto& v : metric.histograms) {
std::string label_str = build_label_str(v);
struct Histogram {
std::vector<int64_t> buckets;
};
line = PrometheusValueLine{metric_name, "_mean ", label_str}(v.mean);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_count ", label_str}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_variance ",
label_str}(v.sum_of_squared_deviation);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_sum ",
label_str}(v.mean * v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) --end;
for (size_t i = 0; i < end; i++) {
std::string bucket_labels = absl::StrCat(
label_str, label_str.empty() ? "" : ", ", "le=\"", i, "\"");
line = PrometheusValueLine{metric_name, "_bucket ",
bucket_labels}(v.buckets[i]);
if (!line.empty()) {
handle_line(std::move(line));
}
}
std::string bucket_labels =
absl::StrCat(label_str, label_str.empty() ? "" : ", ", "le=\"+Inf\"");
line =
PrometheusValueLine{metric_name, "_bucket ", bucket_labels}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
} | TEST(PrometheusTest, PrometheusExpositionFormat) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::string> lines;
PrometheusExpositionFormat(
metric, [&](std::string line) { lines.push_back(std::move(line)); });
return lines;
};
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
EXPECT_THAT(format_lines(metric), ::testing::IsEmpty());
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
h.mean = 1;
h.sum_of_squared_deviation = 1;
h.buckets.push_back(0);
h.buckets.push_back(1);
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
v.max_value = int64_t{2};
EXPECT_THAT(format_lines(metric),
::testing::ElementsAre(
"metric_name {field_name=\"vv\"} 1",
"metric_name_max {field_name=\"vv\"} 2",
"metric_name_mean {field_name=\"hh\"} 1",
"metric_name_count {field_name=\"hh\"} 1",
"metric_name_variance {field_name=\"hh\"} 1",
"metric_name_sum {field_name=\"hh\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"0\"} 0",
"metric_name_bucket {field_name=\"hh\", le=\"1\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"+Inf\"} 1"));
} |
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstdint>
#include <cstring>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/TypeSwitch.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
namespace mlrt {
namespace {
bool CanBeInlined(mlir::Attribute attr, absl::string_view data) {
return mlir::isa<mlir::IntegerAttr, mlir::FloatAttr, mlir::FlatSymbolRefAttr>(
attr) &&
data.size() <= sizeof(uint32_t);
}
template <typename T>
std::string EncodeIntegerOrFloat(T attr) {
std::string data(sizeof(attr), '\0');
std::memcpy(data.data(), &attr, sizeof(attr));
return data;
}
template <typename T>
std::optional<std::string> EncodeListOfInteger(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
mlir::Type type;
for (int i = 0; i < array.size(); ++i) {
if (auto integer_attr = mlir::dyn_cast<mlir::IntegerAttr>(array[i])) {
if (type && integer_attr.getType() != type) return std::nullopt;
type = integer_attr.getType();
llvm::APInt value = integer_attr.getValue();
if (value.getBitWidth() != sizeof(T) * 8) return std::nullopt;
ctor.ConstructAt(i, value.getZExtValue());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfSymbolRef(
const ModuleEmitterContext& module_context, mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint32_t>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto symbol_ref = mlir::dyn_cast<mlir::FlatSymbolRefAttr>(array[i])) {
ctor.ConstructAt(i, module_context.GetFunctionId(symbol_ref.getValue()));
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
template <typename T>
std::optional<std::string> EncodeDenseArray(llvm::ArrayRef<T> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
if (!array.empty()) {
ctor.Place(reinterpret_cast<const char*>(array.data()),
array.size() * sizeof(T));
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeDenseBoolArray(llvm::ArrayRef<bool> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint8_t>>(&allocator, array.size());
if (!array.empty()) {
std::vector<uint8_t> data(array.size());
int i = 0;
for (auto v : array) {
data[i++] = static_cast<uint8_t>(v);
}
ctor.Place(reinterpret_cast<const char*>(data.data()), data.size());
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfString(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<bc::String>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto string_attr = mlir::dyn_cast<mlir::StringAttr>(array[i])) {
ctor.ConstructAt(i, string_attr.getValue().str());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
struct FunctionEmitterContext {
explicit FunctionEmitterContext(const ModuleEmitterContext* module_context)
: module_context(*module_context) {}
const ModuleEmitterContext& module_context;
struct RegInfo {
int num_uses = 0;
int id = -1;
};
int next_reg_id = 0;
llvm::DenseMap<mlir::Value, RegInfo> register_table;
std::vector<int> free_regs;
int AssignRegId() {
if (free_regs.empty()) {
return next_reg_id++;
}
int id = free_regs.back();
free_regs.pop_back();
return id;
}
void FreeRegId(int id) { free_regs.push_back(id); }
};
void EmitKernel(FunctionEmitterContext& function_context,
bc::Kernel::Constructor& constructor, mlir::Operation& op,
std::vector<uint32_t>& function_output_regs,
std::vector<uint8_t>& function_output_last_uses) {
std::vector<uint32_t> results;
results.reserve(op.getNumResults());
for (auto result : op.getResults()) {
auto iter = function_context.register_table.find(result);
CHECK(iter != function_context.register_table.end());
CHECK_EQ(iter->second.id, -1);
iter->second.id = function_context.AssignRegId();
results.push_back(iter->second.id);
}
constructor.construct_results(results.size())
.Assign(results.begin(), results.end());
std::vector<uint32_t> arguments;
std::vector<uint8_t> last_uses;
arguments.reserve(op.getNumOperands());
last_uses.reserve(op.getNumOperands());
for (auto operand : op.getOperands()) {
auto iter = function_context.register_table.find(operand);
CHECK(iter != function_context.register_table.end());
int id = iter->second.id;
CHECK_NE(id, -1);
last_uses.push_back(0);
if (--iter->second.num_uses == 0) {
function_context.FreeRegId(id);
last_uses.back() = 1;
}
arguments.push_back(id);
}
constructor.construct_arguments(arguments.size())
.Assign(arguments.begin(), arguments.end());
constructor.construct_last_uses(last_uses.size())
.Assign(last_uses.begin(), last_uses.end());
std::vector<uint32_t> attributes;
attributes.reserve(op.getAttrs().size());
for (auto attr : op.getAttrs()) {
int attr_id =
function_context.module_context.GetAttributeId(attr.getValue());
absl::string_view attr_data =
function_context.module_context.attributes().at(attr_id);
if (CanBeInlined(attr.getValue(), attr_data)) {
uint32_t data = 0;
std::memcpy(&data, attr_data.data(), attr_data.size());
attributes.push_back(data);
} else {
attributes.push_back(attr_id);
}
}
constructor.construct_attributes(attributes.size())
.Assign(attributes.begin(), attributes.end());
if (llvm::isa<mlir::func::ReturnOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("return"));
function_output_regs = std::move(arguments);
function_output_last_uses = std::move(last_uses);
} else if (llvm::isa<mlir::func::CallOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("call"));
} else {
llvm::StringRef op_name = op.getName().getStringRef();
constructor.set_code(function_context.module_context.GetKernelId(op_name));
}
}
void EmitFunction(const ModuleEmitterContext& module_context,
bc::Function::Constructor& constructor, llvm::StringRef name,
mlir::Region& region) {
FunctionEmitterContext function_context(&module_context);
constructor.construct_name(name.str());
DCHECK(llvm::hasSingleElement(region)) << "should have a single block";
auto& block = region.front();
auto& register_table = function_context.register_table;
std::vector<uint32_t> input_regs;
input_regs.reserve(block.getNumArguments());
for (auto arg : block.getArguments()) {
int id = function_context.AssignRegId();
input_regs.push_back(id);
register_table[arg] = {static_cast<int>(std::distance(arg.getUses().begin(),
arg.getUses().end())),
id};
}
constructor.construct_input_regs(input_regs);
for (auto& op : block) {
for (auto result : op.getResults()) {
register_table[result] = {static_cast<int>(
std::distance(result.getUses().begin(), result.getUses().end()))};
}
}
auto kernels_constructor =
constructor.construct_kernels(block.getOperations().size());
std::vector<uint32_t> output_regs;
std::vector<uint8_t> output_last_uses;
for (const auto& iter : llvm::enumerate(block.getOperations())) {
int i = iter.index();
mlir::Operation& op = iter.value();
auto kernel_ctor = kernels_constructor.ConstructAt(i);
EmitKernel(function_context, kernel_ctor, op, output_regs,
output_last_uses);
}
constructor.set_num_regs(function_context.next_reg_id);
constructor.construct_output_regs(output_regs);
constructor.construct_output_last_uses(output_last_uses);
}
absl::Status EmitExecutable(ModuleEmitterContext& module_context,
bc::Executable::Constructor& constructor,
mlir::ModuleOp module) {
module.walk(
[&](mlir::func::FuncOp func) { module_context.AddFunction(func); });
auto functions = module_context.functions();
for (auto func : functions) {
if (!llvm::hasSingleElement(func.getRegion())) {
return absl::InvalidArgumentError("function should have a single block.");
}
auto& block = func.getRegion().front();
for (auto& op : block) {
if (llvm::isa<mlir::func::CallOp>(&op)) {
module_context.AddKernelName("call");
} else if (llvm::isa<mlir::func::ReturnOp>(&op)) {
if (op.getNumResults() != 0) {
return absl::InvalidArgumentError(
"Block terminator must be a return op.");
}
module_context.AddKernelName("return");
} else {
module_context.AddKernelName(op.getName().getStringRef().str());
}
for (auto attr : op.getAttrs()) {
if (auto status = module_context.AddAttribute(&op, attr.getValue());
!status.ok()) {
return status;
}
}
}
}
constructor.construct_kernel_names(module_context.kernels().size())
.Assign(module_context.kernels().begin(), module_context.kernels().end());
auto functions_constructor =
constructor.construct_functions(functions.size());
for (int i = 0; i < functions.size(); ++i) {
auto func = functions[i];
auto function_ctor = functions_constructor.ConstructAt(i);
EmitFunction(module_context, function_ctor, func.getSymName(),
func.getRegion());
}
constructor.construct_attributes(module_context.attributes().size())
.Assign(module_context.attributes().begin(),
module_context.attributes().end());
return absl::OkStatus();
}
}
absl::Status ModuleEmitterContext::AddAttribute(mlir::Operation* op,
mlir::Attribute attr) {
absl::StatusOr<std::string> attr_data;
if (auto* encoder = attribute_encoder_registry_.Get(
op->getName().getDialectNamespace())) {
attr_data = (*encoder)(*this, attr);
} else {
attr_data = DefaultEncodeAttribute(attr);
}
if (!attr_data.ok()) return std::move(attr_data).status();
int id = AddData(std::move(*attr_data), attributes_, attribute_data_id_map_);
attribute_id_map_[attr] = id;
return absl::OkStatus();
}
int ModuleEmitterContext::AddFunction(mlir::func::FuncOp func) {
int id = functions_.size();
functions_.push_back(func);
DCHECK(!function_name_id_map_.contains(func.getSymName()));
function_name_id_map_[func.getSymName()] = id;
return id;
}
std::optional<std::string> EncodeSimpleAttribute(
const ModuleEmitterContext& module_context, mlir::Attribute attr) {
return llvm::TypeSwitch<mlir::Attribute, std::optional<std::string>>(attr)
.Case<mlir::StringAttr>(
[](const auto& str_attr) { return str_attr.str(); })
.Case<mlir::IntegerAttr>(
[](const auto& integer_attr) -> std::optional<std::string> {
switch (llvm::APInt value = integer_attr.getValue();
value.getBitWidth()) {
case 1:
return EncodeIntegerOrFloat<uint8_t>(value.getZExtValue());
case 32:
return EncodeIntegerOrFloat<uint32_t>(value.getZExtValue());
case 64:
return EncodeIntegerOrFloat<uint64_t>(value.getZExtValue());
default:
return std::nullopt;
}
})
.Case<mlir::FloatAttr>(
[](const auto& float_attr) -> std::optional<std::string> {
llvm::APFloat value = float_attr.getValue();
if (float_attr.getType().isF32()) {
return EncodeIntegerOrFloat<float>(value.convertToFloat());
}
return std::nullopt;
})
.Case<mlir::ArrayAttr>([&](const auto& array_attr)
-> std::optional<std::string> {
if (auto encoded_list_i32 = EncodeListOfInteger<uint32_t>(array_attr)) {
return std::move(*encoded_list_i32);
} else if (auto encoded_list_i64 =
EncodeListOfInteger<uint64_t>(array_attr)) {
return std::move(*encoded_list_i64);
} else if (auto encoded_list_string = EncodeListOfString(array_attr)) {
return std::move(*encoded_list_string);
} else if (auto encoded_list_symbol_ref =
EncodeListOfSymbolRef(module_context, array_attr)) {
return std::move(*encoded_list_symbol_ref);
} else {
return std::nullopt;
}
})
.Case<mlir::DenseI32ArrayAttr>(
[](const auto& dense_array_i32) -> std::optional<std::string> {
return EncodeDenseArray<int32_t>(dense_array_i32);
})
.Case<mlir::DenseI64ArrayAttr>(
[](const auto& dense_array_i64) -> std::optional<std::string> {
return EncodeDenseArray<int64_t>(dense_array_i64);
})
.Case<mlir::DenseBoolArrayAttr>(
[](const auto& dense_array_bool) -> std::optional<std::string> {
return EncodeDenseBoolArray(dense_array_bool.asArrayRef());
})
.Case<mlir::FlatSymbolRefAttr>([&](const auto& symbol_ref) {
return EncodeIntegerOrFloat<uint32_t>(
module_context.GetFunctionId(symbol_ref.getValue()));
})
.Default([](const auto& attr) { return std::nullopt; });
}
absl::StatusOr<std::string> ModuleEmitterContext::DefaultEncodeAttribute(
mlir::Attribute attr) {
if (auto result = EncodeSimpleAttribute(*this, attr)) {
return std::move(*result);
}
std ::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Try to encode unsupported attribute: ", attr_str));
}
absl::StatusOr<bc::Buffer> EmitExecutable(
const AttributeEncoderRegistry& attribute_encoder_registry,
mlir::ModuleOp module) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
ModuleEmitterContext module_context(&attribute_encoder_registry);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
if (auto status = EmitExecutable(module_context, executable_ctor, module);
!status.ok()) {
return status;
}
buffer.shrink_to_fit();
return buffer;
}
} | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/attribute_span.h"
#include "tsl/platform/resource_loader.h"
#include "tsl/platform/status_matchers.h"
namespace mlrt {
namespace {
using ::testing::ElementsAreArray;
using ::testing::FloatEq;
using ::testing::IsEmpty;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
TEST(MlirToByteCodeTest, Basic) {
constexpr char kBasicMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/basic.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto kernel_names = executable.kernel_names();
EXPECT_THAT(kernel_names,
ElementsAreArray({"test_mlbc.add.i32", "test_mlbc.sub.i32",
"call", "return"}));
auto functions = executable.functions();
ASSERT_GE(functions.size(), 1);
auto function = functions[0];
EXPECT_EQ(function.name().str(), "add_i32_10");
EXPECT_EQ(function.num_regs(), 5);
EXPECT_THAT(function.input_regs(), ElementsAreArray({0}));
EXPECT_THAT(function.output_regs(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(function.output_last_uses(),
ElementsAreArray({true, false, true}));
auto kernels = function.kernels();
ASSERT_EQ(kernels.size(), 11);
EXPECT_EQ(kernels[0].code(), 0);
EXPECT_THAT(kernels[0].arguments(), ElementsAreArray({0, 0}));
EXPECT_THAT(kernels[0].results(), ElementsAreArray({1}));
EXPECT_THAT(kernels[0].last_uses(), ElementsAreArray({0, 0}));
for (int i = 1; i < 9; i++) {
EXPECT_EQ(kernels[i].code(), i % 2);
EXPECT_THAT(kernels[i].arguments(), ElementsAreArray({(i - 1) % 2 + 1, 0}));
EXPECT_THAT(kernels[i].results(), ElementsAreArray({i % 2 + 1}));
EXPECT_THAT(kernels[i].last_uses(), ElementsAreArray({1, 0}));
}
EXPECT_EQ(kernels[9].code(), 2);
EXPECT_THAT(kernels[9].arguments(), ElementsAreArray({1}));
EXPECT_THAT(kernels[9].last_uses(), ElementsAreArray({true}));
EXPECT_THAT(kernels[9].results(), ElementsAreArray({2, 3, 4}));
EXPECT_EQ(kernels[10].code(), 3);
EXPECT_THAT(kernels[10].arguments(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(kernels[10].last_uses(), ElementsAreArray({true, false, true}));
EXPECT_TRUE(kernels[10].results().empty());
}
template <typename T>
absl::StatusOr<T> DecodeAttribute(absl::string_view data) {
if (data.size() < sizeof(T))
return absl::InvalidArgumentError("Invalid data size for attribute.");
T value;
std::memcpy(&value, data.data(), sizeof(T));
return value;
}
TEST(MlirToByteCodeTest, BasicAttributes) {
constexpr char kBasicAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"basic_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 15);
auto attr_iter = attributes.begin();
EXPECT_EQ(*attr_iter, "test string");
++attr_iter;
EXPECT_EQ(*attr_iter, "ts");
++attr_iter;
EXPECT_THAT(DecodeAttribute<int32_t>(*attr_iter), IsOkAndHolds(100));
++attr_iter;
EXPECT_THAT(DecodeAttribute<int64_t>(*attr_iter), IsOkAndHolds(200));
++attr_iter;
EXPECT_THAT(DecodeAttribute<float>(*attr_iter), IsOkAndHolds(FloatEq(3.0)));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint8_t>(*attr_iter), IsOkAndHolds(0));
++attr_iter;
bc::Vector<int64_t> list_of_i64((*attr_iter).data());
EXPECT_THAT(list_of_i64, ElementsAreArray({0, 1, 2, 3, 4}));
++attr_iter;
bc::Vector<int32_t> list_of_i32((*attr_iter).data());
EXPECT_THAT(list_of_i32, ElementsAreArray({0, 1, 2, 3}));
++attr_iter;
bc::Vector<bc::String> list_of_str((*attr_iter).data());
EXPECT_THAT(list_of_str, ElementsAreArray({"string 0", "string 1"}));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint32_t>(*attr_iter), IsOkAndHolds(1));
EXPECT_EQ(executable.functions()[1].name().Get(), "callee");
++attr_iter;
bc::Vector<int32_t> list_of_symbol_ref((*attr_iter).data());
EXPECT_EQ(executable.functions()[2].name().Get(), "callee0");
EXPECT_EQ(executable.functions()[3].name().Get(), "callee1");
EXPECT_THAT(list_of_symbol_ref, ElementsAreArray({2, 3}));
++attr_iter;
bc::Vector<int32_t> dense_array_of_i32((*attr_iter).data());
EXPECT_THAT(dense_array_of_i32, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int64_t> dense_array_of_i64((*attr_iter).data());
EXPECT_THAT(dense_array_of_i64, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int32_t> empty_dense_array((*attr_iter).data());
EXPECT_TRUE(empty_dense_array.empty());
++attr_iter;
bc::Vector<uint8_t> dense_array_of_bool((*attr_iter).data());
EXPECT_THAT(dense_array_of_bool, ElementsAreArray({true, false}));
auto kernels = executable.functions()[0].kernels();
ASSERT_EQ(kernels.size(), 16);
auto kernel_iter = kernels.begin();
auto attribute_span = [&](auto kernel_iter) {
return mlrt::AttributeSpan((*kernel_iter).attributes(), attributes);
};
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(),
"test string");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "ts");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int32_t>(0), 100);
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int64_t>(0), 200);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<float>(0), FloatEq(3.0));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint8_t>(0), false);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2, 3, 4}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bc::String>>(0),
ElementsAreArray({"string 0", "string 1"}));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint32_t>(0), 1);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
IsEmpty());
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bool>>(0),
ElementsAreArray({true, false}));
}
TEST(MlirToByteCodeTest, UnsupportedAttributes) {
constexpr char kUnsupportedAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"unsupported_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kUnsupportedAttributesMlir),
&mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
EXPECT_THAT(EmitExecutable(attribute_encoder_registry, mlir_module.get()),
StatusIs(absl::StatusCode::kInvalidArgument,
"Try to encode unsupported attribute: unit"));
}
class CustomDense {
public:
struct StorageType {
using Self = StorageType;
DEFINE_BYTECODE_FIELD(bc::Vector<int64_t>, shape);
DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, data);
};
class Constructor {
public:
Constructor(bc::Allocator* allocator, bc::BcAddr_t address)
: allocator_(allocator), address_(address) {}
template <typename... Args>
auto construct_shape(Args&&... args) {
return StorageType::construct_shape(allocator_, address_,
std::forward<Args>(args)...);
}
template <typename... Args>
auto construct_data(Args&&... args) {
return StorageType::construct_data(allocator_, address_,
std::forward<Args>(args)...);
}
bc::BcAddr_t address() const { return address_; }
private:
bc::Allocator* allocator_;
bc::BcAddr_t address_;
};
using NonTrivialConstructorType = Constructor;
explicit CustomDense(const char* p) : p_(p) {}
bc::Vector<int64_t> shape() const { return StorageType::read_shape(p_); }
bc::Vector<uint32_t> data() const { return StorageType::read_data(p_); }
private:
const char* p_ = nullptr;
};
absl::StatusOr<std::string> EncodeCustomDense(const ModuleEmitterContext&,
mlir::Attribute attr) {
auto dense_int_attr = mlir::dyn_cast<mlir::DenseIntElementsAttr>(attr);
if (!dense_int_attr)
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an integer.");
if (mlir::cast<mlir::IntegerType>(dense_int_attr.getElementType())
.getWidth() != 32) {
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an i32 integer.");
}
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto custom_dense_ctor = bc::New<CustomDense>(&allocator);
auto shaped_type = dense_int_attr.getType();
std::vector<int64_t> shape(shaped_type.getShape().begin(),
shaped_type.getShape().end());
custom_dense_ctor.construct_shape(shape);
custom_dense_ctor.construct_data(shaped_type.getNumElements())
.Place(dense_int_attr.getRawData().data(),
dense_int_attr.getRawData().size());
return std::string(buffer.data(), buffer.size());
}
TEST(MlirToByteCodeTest, CustomDense) {
constexpr char kCustomAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"custom_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kCustomAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
attribute_encoder_registry.Register("test_custom", &EncodeCustomDense);
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 10);
for (int i = 0; i < 10; ++i) {
bc::String attr_data = attributes[i];
CustomDense custom_dense(attr_data.data());
EXPECT_THAT(custom_dense.shape(), ElementsAreArray({1}));
EXPECT_THAT(custom_dense.data(), ElementsAreArray({i}));
}
}
}
} | std::optional<std::string> EncodeListOfString(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<bc::String>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto string_attr = mlir::dyn_cast<mlir::StringAttr>(array[i])) {
ctor.ConstructAt(i, string_attr.getValue().str());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
} | TEST(MlirToByteCodeTest, BasicAttributes) {
constexpr char kBasicAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"basic_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 15);
auto attr_iter = attributes.begin();
EXPECT_EQ(*attr_iter, "test string");
++attr_iter;
EXPECT_EQ(*attr_iter, "ts");
++attr_iter;
EXPECT_THAT(DecodeAttribute<int32_t>(*attr_iter), IsOkAndHolds(100));
++attr_iter;
EXPECT_THAT(DecodeAttribute<int64_t>(*attr_iter), IsOkAndHolds(200));
++attr_iter;
EXPECT_THAT(DecodeAttribute<float>(*attr_iter), IsOkAndHolds(FloatEq(3.0)));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint8_t>(*attr_iter), IsOkAndHolds(0));
++attr_iter;
bc::Vector<int64_t> list_of_i64((*attr_iter).data());
EXPECT_THAT(list_of_i64, ElementsAreArray({0, 1, 2, 3, 4}));
++attr_iter;
bc::Vector<int32_t> list_of_i32((*attr_iter).data());
EXPECT_THAT(list_of_i32, ElementsAreArray({0, 1, 2, 3}));
++attr_iter;
bc::Vector<bc::String> list_of_str((*attr_iter).data());
EXPECT_THAT(list_of_str, ElementsAreArray({"string 0", "string 1"}));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint32_t>(*attr_iter), IsOkAndHolds(1));
EXPECT_EQ(executable.functions()[1].name().Get(), "callee");
++attr_iter;
bc::Vector<int32_t> list_of_symbol_ref((*attr_iter).data());
EXPECT_EQ(executable.functions()[2].name().Get(), "callee0");
EXPECT_EQ(executable.functions()[3].name().Get(), "callee1");
EXPECT_THAT(list_of_symbol_ref, ElementsAreArray({2, 3}));
++attr_iter;
bc::Vector<int32_t> dense_array_of_i32((*attr_iter).data());
EXPECT_THAT(dense_array_of_i32, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int64_t> dense_array_of_i64((*attr_iter).data());
EXPECT_THAT(dense_array_of_i64, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int32_t> empty_dense_array((*attr_iter).data());
EXPECT_TRUE(empty_dense_array.empty());
++attr_iter;
bc::Vector<uint8_t> dense_array_of_bool((*attr_iter).data());
EXPECT_THAT(dense_array_of_bool, ElementsAreArray({true, false}));
auto kernels = executable.functions()[0].kernels();
ASSERT_EQ(kernels.size(), 16);
auto kernel_iter = kernels.begin();
auto attribute_span = [&](auto kernel_iter) {
return mlrt::AttributeSpan((*kernel_iter).attributes(), attributes);
};
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(),
"test string");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "ts");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int32_t>(0), 100);
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int64_t>(0), 200);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<float>(0), FloatEq(3.0));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint8_t>(0), false);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2, 3, 4}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bc::String>>(0),
ElementsAreArray({"string 0", "string 1"}));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint32_t>(0), 1);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
IsEmpty());
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bool>>(0),
ElementsAreArray({true, false}));
} |
#include "quiche/spdy/core/spdy_protocol.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/spdy/core/http2_header_block.h"
#include "quiche/spdy/core/spdy_alt_svc_wire_format.h"
namespace spdy {
const char* const kHttp2ConnectionHeaderPrefix =
"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
std::ostream& operator<<(std::ostream& out, SpdyKnownSettingsId id) {
return out << static_cast<SpdySettingsId>(id);
}
std::ostream& operator<<(std::ostream& out, SpdyFrameType frame_type) {
return out << SerializeFrameType(frame_type);
}
SpdyPriority ClampSpdy3Priority(SpdyPriority priority) {
static_assert(std::numeric_limits<SpdyPriority>::min() == kV3HighestPriority,
"The value of given priority shouldn't be smaller than highest "
"priority. Check this invariant explicitly.");
if (priority > kV3LowestPriority) {
QUICHE_BUG(spdy_bug_22_1)
<< "Invalid priority: " << static_cast<int>(priority);
return kV3LowestPriority;
}
return priority;
}
int ClampHttp2Weight(int weight) {
if (weight < kHttp2MinStreamWeight) {
QUICHE_BUG(spdy_bug_22_2) << "Invalid weight: " << weight;
return kHttp2MinStreamWeight;
}
if (weight > kHttp2MaxStreamWeight) {
QUICHE_BUG(spdy_bug_22_3) << "Invalid weight: " << weight;
return kHttp2MaxStreamWeight;
}
return weight;
}
int Spdy3PriorityToHttp2Weight(SpdyPriority priority) {
priority = ClampSpdy3Priority(priority);
const float kSteps = 255.9f / 7.f;
return static_cast<int>(kSteps * (7.f - priority)) + 1;
}
SpdyPriority Http2WeightToSpdy3Priority(int weight) {
weight = ClampHttp2Weight(weight);
const float kSteps = 255.9f / 7.f;
return static_cast<SpdyPriority>(7.f - (weight - 1) / kSteps);
}
bool IsDefinedFrameType(uint8_t frame_type_field) {
switch (static_cast<SpdyFrameType>(frame_type_field)) {
case SpdyFrameType::DATA:
return true;
case SpdyFrameType::HEADERS:
return true;
case SpdyFrameType::PRIORITY:
return true;
case SpdyFrameType::RST_STREAM:
return true;
case SpdyFrameType::SETTINGS:
return true;
case SpdyFrameType::PUSH_PROMISE:
return true;
case SpdyFrameType::PING:
return true;
case SpdyFrameType::GOAWAY:
return true;
case SpdyFrameType::WINDOW_UPDATE:
return true;
case SpdyFrameType::CONTINUATION:
return true;
case SpdyFrameType::ALTSVC:
return true;
case SpdyFrameType::PRIORITY_UPDATE:
return true;
case SpdyFrameType::ACCEPT_CH:
return true;
}
return false;
}
SpdyFrameType ParseFrameType(uint8_t frame_type_field) {
QUICHE_BUG_IF(spdy_bug_22_4, !IsDefinedFrameType(frame_type_field))
<< "Frame type not defined: " << static_cast<int>(frame_type_field);
return static_cast<SpdyFrameType>(frame_type_field);
}
uint8_t SerializeFrameType(SpdyFrameType frame_type) {
return static_cast<uint8_t>(frame_type);
}
bool IsValidHTTP2FrameStreamId(SpdyStreamId current_frame_stream_id,
SpdyFrameType frame_type_field) {
if (current_frame_stream_id == 0) {
switch (frame_type_field) {
case SpdyFrameType::DATA:
case SpdyFrameType::HEADERS:
case SpdyFrameType::PRIORITY:
case SpdyFrameType::RST_STREAM:
case SpdyFrameType::CONTINUATION:
case SpdyFrameType::PUSH_PROMISE:
return false;
default:
return true;
}
} else {
switch (frame_type_field) {
case SpdyFrameType::GOAWAY:
case SpdyFrameType::SETTINGS:
case SpdyFrameType::PING:
return false;
default:
return true;
}
}
}
const char* FrameTypeToString(SpdyFrameType frame_type) {
switch (frame_type) {
case SpdyFrameType::DATA:
return "DATA";
case SpdyFrameType::RST_STREAM:
return "RST_STREAM";
case SpdyFrameType::SETTINGS:
return "SETTINGS";
case SpdyFrameType::PING:
return "PING";
case SpdyFrameType::GOAWAY:
return "GOAWAY";
case SpdyFrameType::HEADERS:
return "HEADERS";
case SpdyFrameType::WINDOW_UPDATE:
return "WINDOW_UPDATE";
case SpdyFrameType::PUSH_PROMISE:
return "PUSH_PROMISE";
case SpdyFrameType::CONTINUATION:
return "CONTINUATION";
case SpdyFrameType::PRIORITY:
return "PRIORITY";
case SpdyFrameType::ALTSVC:
return "ALTSVC";
case SpdyFrameType::PRIORITY_UPDATE:
return "PRIORITY_UPDATE";
case SpdyFrameType::ACCEPT_CH:
return "ACCEPT_CH";
}
return "UNKNOWN_FRAME_TYPE";
}
bool ParseSettingsId(SpdySettingsId wire_setting_id,
SpdyKnownSettingsId* setting_id) {
if (wire_setting_id != SETTINGS_EXPERIMENT_SCHEDULER &&
(wire_setting_id < SETTINGS_MIN || wire_setting_id > SETTINGS_MAX)) {
return false;
}
*setting_id = static_cast<SpdyKnownSettingsId>(wire_setting_id);
switch (*setting_id) {
case SETTINGS_HEADER_TABLE_SIZE:
case SETTINGS_ENABLE_PUSH:
case SETTINGS_MAX_CONCURRENT_STREAMS:
case SETTINGS_INITIAL_WINDOW_SIZE:
case SETTINGS_MAX_FRAME_SIZE:
case SETTINGS_MAX_HEADER_LIST_SIZE:
case SETTINGS_ENABLE_CONNECT_PROTOCOL:
case SETTINGS_DEPRECATE_HTTP2_PRIORITIES:
case SETTINGS_EXPERIMENT_SCHEDULER:
return true;
}
return false;
}
std::string SettingsIdToString(SpdySettingsId id) {
SpdyKnownSettingsId known_id;
if (!ParseSettingsId(id, &known_id)) {
return absl::StrCat("SETTINGS_UNKNOWN_", absl::Hex(uint32_t{id}));
}
switch (known_id) {
case SETTINGS_HEADER_TABLE_SIZE:
return "SETTINGS_HEADER_TABLE_SIZE";
case SETTINGS_ENABLE_PUSH:
return "SETTINGS_ENABLE_PUSH";
case SETTINGS_MAX_CONCURRENT_STREAMS:
return "SETTINGS_MAX_CONCURRENT_STREAMS";
case SETTINGS_INITIAL_WINDOW_SIZE:
return "SETTINGS_INITIAL_WINDOW_SIZE";
case SETTINGS_MAX_FRAME_SIZE:
return "SETTINGS_MAX_FRAME_SIZE";
case SETTINGS_MAX_HEADER_LIST_SIZE:
return "SETTINGS_MAX_HEADER_LIST_SIZE";
case SETTINGS_ENABLE_CONNECT_PROTOCOL:
return "SETTINGS_ENABLE_CONNECT_PROTOCOL";
case SETTINGS_DEPRECATE_HTTP2_PRIORITIES:
return "SETTINGS_DEPRECATE_HTTP2_PRIORITIES";
case SETTINGS_EXPERIMENT_SCHEDULER:
return "SETTINGS_EXPERIMENT_SCHEDULER";
}
return absl::StrCat("SETTINGS_UNKNOWN_", absl::Hex(uint32_t{id}));
}
SpdyErrorCode ParseErrorCode(uint32_t wire_error_code) {
if (wire_error_code > ERROR_CODE_MAX) {
return ERROR_CODE_INTERNAL_ERROR;
}
return static_cast<SpdyErrorCode>(wire_error_code);
}
const char* ErrorCodeToString(SpdyErrorCode error_code) {
switch (error_code) {
case ERROR_CODE_NO_ERROR:
return "NO_ERROR";
case ERROR_CODE_PROTOCOL_ERROR:
return "PROTOCOL_ERROR";
case ERROR_CODE_INTERNAL_ERROR:
return "INTERNAL_ERROR";
case ERROR_CODE_FLOW_CONTROL_ERROR:
return "FLOW_CONTROL_ERROR";
case ERROR_CODE_SETTINGS_TIMEOUT:
return "SETTINGS_TIMEOUT";
case ERROR_CODE_STREAM_CLOSED:
return "STREAM_CLOSED";
case ERROR_CODE_FRAME_SIZE_ERROR:
return "FRAME_SIZE_ERROR";
case ERROR_CODE_REFUSED_STREAM:
return "REFUSED_STREAM";
case ERROR_CODE_CANCEL:
return "CANCEL";
case ERROR_CODE_COMPRESSION_ERROR:
return "COMPRESSION_ERROR";
case ERROR_CODE_CONNECT_ERROR:
return "CONNECT_ERROR";
case ERROR_CODE_ENHANCE_YOUR_CALM:
return "ENHANCE_YOUR_CALM";
case ERROR_CODE_INADEQUATE_SECURITY:
return "INADEQUATE_SECURITY";
case ERROR_CODE_HTTP_1_1_REQUIRED:
return "HTTP_1_1_REQUIRED";
}
return "UNKNOWN_ERROR_CODE";
}
const char* WriteSchedulerTypeToString(WriteSchedulerType type) {
switch (type) {
case WriteSchedulerType::LIFO:
return "LIFO";
case WriteSchedulerType::SPDY:
return "SPDY";
case WriteSchedulerType::HTTP2:
return "HTTP2";
case WriteSchedulerType::FIFO:
return "FIFO";
}
return "UNKNOWN";
}
size_t GetNumberRequiredContinuationFrames(size_t size) {
QUICHE_DCHECK_GT(size, kHttp2MaxControlFrameSendSize);
size_t overflow = size - kHttp2MaxControlFrameSendSize;
int payload_size =
kHttp2MaxControlFrameSendSize - kContinuationFrameMinimumSize;
return (overflow - 1) / payload_size + 1;
}
const char* const kHttp2Npn = "h2";
const char* const kHttp2AuthorityHeader = ":authority";
const char* const kHttp2MethodHeader = ":method";
const char* const kHttp2PathHeader = ":path";
const char* const kHttp2SchemeHeader = ":scheme";
const char* const kHttp2ProtocolHeader = ":protocol";
const char* const kHttp2StatusHeader = ":status";
bool SpdyFrameIR::fin() const { return false; }
int SpdyFrameIR::flow_control_window_consumed() const { return 0; }
bool SpdyFrameWithFinIR::fin() const { return fin_; }
SpdyFrameWithHeaderBlockIR::SpdyFrameWithHeaderBlockIR(
SpdyStreamId stream_id, Http2HeaderBlock header_block)
: SpdyFrameWithFinIR(stream_id), header_block_(std::move(header_block)) {}
SpdyFrameWithHeaderBlockIR::~SpdyFrameWithHeaderBlockIR() = default;
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id, absl::string_view data)
: SpdyFrameWithFinIR(stream_id),
data_(nullptr),
data_len_(0),
padded_(false),
padding_payload_len_(0) {
SetDataDeep(data);
}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id, const char* data)
: SpdyDataIR(stream_id, absl::string_view(data)) {}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id, std::string data)
: SpdyFrameWithFinIR(stream_id),
data_store_(std::make_unique<std::string>(std::move(data))),
data_(data_store_->data()),
data_len_(data_store_->size()),
padded_(false),
padding_payload_len_(0) {}
SpdyDataIR::SpdyDataIR(SpdyStreamId stream_id)
: SpdyFrameWithFinIR(stream_id),
data_(nullptr),
data_len_(0),
padded_(false),
padding_payload_len_(0) {}
SpdyDataIR::~SpdyDataIR() = default;
void SpdyDataIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitData(*this);
}
SpdyFrameType SpdyDataIR::frame_type() const { return SpdyFrameType::DATA; }
int SpdyDataIR::flow_control_window_consumed() const {
return padded_ ? 1 + padding_payload_len_ + data_len_ : data_len_;
}
size_t SpdyDataIR::size() const {
return kFrameHeaderSize +
(padded() ? 1 + padding_payload_len() + data_len() : data_len());
}
SpdyRstStreamIR::SpdyRstStreamIR(SpdyStreamId stream_id,
SpdyErrorCode error_code)
: SpdyFrameIR(stream_id) {
set_error_code(error_code);
}
SpdyRstStreamIR::~SpdyRstStreamIR() = default;
void SpdyRstStreamIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitRstStream(*this);
}
SpdyFrameType SpdyRstStreamIR::frame_type() const {
return SpdyFrameType::RST_STREAM;
}
size_t SpdyRstStreamIR::size() const { return kRstStreamFrameSize; }
SpdySettingsIR::SpdySettingsIR() : is_ack_(false) {}
SpdySettingsIR::~SpdySettingsIR() = default;
void SpdySettingsIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitSettings(*this);
}
SpdyFrameType SpdySettingsIR::frame_type() const {
return SpdyFrameType::SETTINGS;
}
size_t SpdySettingsIR::size() const {
return kFrameHeaderSize + values_.size() * kSettingsOneSettingSize;
}
void SpdyPingIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPing(*this);
}
SpdyFrameType SpdyPingIR::frame_type() const { return SpdyFrameType::PING; }
size_t SpdyPingIR::size() const { return kPingFrameSize; }
SpdyGoAwayIR::SpdyGoAwayIR(SpdyStreamId last_good_stream_id,
SpdyErrorCode error_code,
absl::string_view description)
: description_(description) {
set_last_good_stream_id(last_good_stream_id);
set_error_code(error_code);
}
SpdyGoAwayIR::SpdyGoAwayIR(SpdyStreamId last_good_stream_id,
SpdyErrorCode error_code, const char* description)
: SpdyGoAwayIR(last_good_stream_id, error_code,
absl::string_view(description)) {}
SpdyGoAwayIR::SpdyGoAwayIR(SpdyStreamId last_good_stream_id,
SpdyErrorCode error_code, std::string description)
: description_store_(std::move(description)),
description_(description_store_) {
set_last_good_stream_id(last_good_stream_id);
set_error_code(error_code);
}
SpdyGoAwayIR::~SpdyGoAwayIR() = default;
void SpdyGoAwayIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitGoAway(*this);
}
SpdyFrameType SpdyGoAwayIR::frame_type() const { return SpdyFrameType::GOAWAY; }
size_t SpdyGoAwayIR::size() const {
return kGoawayFrameMinimumSize + description_.size();
}
SpdyContinuationIR::SpdyContinuationIR(SpdyStreamId stream_id)
: SpdyFrameIR(stream_id), end_headers_(false) {}
SpdyContinuationIR::~SpdyContinuationIR() = default;
void SpdyContinuationIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitContinuation(*this);
}
SpdyFrameType SpdyContinuationIR::frame_type() const {
return SpdyFrameType::CONTINUATION;
}
size_t SpdyContinuationIR::size() const {
QUICHE_DLOG(WARNING) << "Shouldn't not call size() for CONTINUATION frame.";
return 0;
}
void SpdyHeadersIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitHeaders(*this);
}
SpdyFrameType SpdyHeadersIR::frame_type() const {
return SpdyFrameType::HEADERS;
}
size_t SpdyHeadersIR::size() const {
size_t size = kHeadersFrameMinimumSize;
if (padded_) {
size += 1;
size += padding_payload_len_;
}
if (has_priority_) {
size += 5;
}
size_t hpack_overhead = kPerHeaderHpackOverheadOld;
if (add_hpack_overhead_bytes_) {
QUICHE_RELOADABLE_FLAG_COUNT(http2_add_hpack_overhead_bytes2);
hpack_overhead = kPerHeaderHpackOverheadNew;
}
size +=
header_block().TotalBytesUsed() + header_block().size() * hpack_overhead;
if (size > kHttp2MaxControlFrameSendSize) {
size += GetNumberRequiredContinuationFrames(size) *
kContinuationFrameMinimumSize;
}
return size;
}
void SpdyWindowUpdateIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitWindowUpdate(*this);
}
SpdyFrameType SpdyWindowUpdateIR::frame_type() const {
return SpdyFrameType::WINDOW_UPDATE;
}
size_t SpdyWindowUpdateIR::size() const { return kWindowUpdateFrameSize; }
void SpdyPushPromiseIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPushPromise(*this);
}
SpdyFrameType SpdyPushPromiseIR::frame_type() const {
return SpdyFrameType::PUSH_PROMISE;
}
size_t SpdyPushPromiseIR::size() const {
size_t size = kPushPromiseFrameMinimumSize;
if (padded_) {
size += 1;
size += padding_payload_len_;
}
size += header_block().TotalBytesUsed();
if (size > kHttp2MaxControlFrameSendSize) {
size += GetNumberRequiredContinuationFrames(size) *
kContinuationFrameMinimumSize;
}
return size;
}
SpdyAltSvcIR::SpdyAltSvcIR(SpdyStreamId stream_id) : SpdyFrameIR(stream_id) {}
SpdyAltSvcIR::~SpdyAltSvcIR() = default;
void SpdyAltSvcIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitAltSvc(*this);
}
SpdyFrameType SpdyAltSvcIR::frame_type() const { return SpdyFrameType::ALTSVC; }
size_t SpdyAltSvcIR::size() const {
size_t size = kGetAltSvcFrameMinimumSize;
size += origin_.length();
std::string str =
SpdyAltSvcWireFormat::SerializeHeaderFieldValue(altsvc_vector_);
size += str.size();
return size;
}
void SpdyPriorityIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPriority(*this);
}
SpdyFrameType SpdyPriorityIR::frame_type() const {
return SpdyFrameType::PRIORITY;
}
size_t SpdyPriorityIR::size() const { return kPriorityFrameSize; }
void SpdyPriorityUpdateIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitPriorityUpdate(*this);
}
SpdyFrameType SpdyPriorityUpdateIR::frame_type() const {
return SpdyFrameType::PRIORITY_UPDATE;
}
size_t SpdyPriorityUpdateIR::size() const {
return kPriorityUpdateFrameMinimumSize + priority_field_value_.size();
}
void SpdyAcceptChIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitAcceptCh(*this);
}
SpdyFrameType SpdyAcceptChIR::frame_type() const {
return SpdyFrameType::ACCEPT_CH;
}
size_t SpdyAcceptChIR::size() const {
size_t total_size = kAcceptChFrameMinimumSize;
for (const AcceptChOriginValuePair& entry : entries_) {
total_size += entry.origin.size() + entry.value.size() +
kAcceptChFramePerEntryOverhead;
}
return total_size;
}
void SpdyUnknownIR::Visit(SpdyFrameVisitor* visitor) const {
return visitor->VisitUnknown(*this);
}
SpdyFrameType SpdyUnknownIR::frame_type() const {
return static_cast<SpdyFrameType>(type());
}
size_t SpdyUnknownIR::size() const {
return kFrameHeaderSize + payload_.size();
}
int SpdyUnknownIR::flow_control_window_consumed() const {
if (frame_type() == SpdyFrameType::DATA) {
return payload_.size();
} else {
return 0;
}
}
const size_t kPadLengthFieldSize = 1;
size_t GetHeaderFrameSizeSansBlock(const SpdyHeadersIR& header_ir) {
size_t min_size = kFrameHeaderSize;
if (header_ir.padded()) {
min_size += kPadLengthFieldSize;
min_size += header_ir.padding_payload_len();
}
if (header_ir.has_priority()) {
min_size += 5;
}
return min_size;
}
size_t GetPushPromiseFrameSizeSansBlock(
const SpdyPushPromiseIR& push_promise_ir) {
size_t min_size = kPushPromiseFrameMinimumSize;
if (push_promise_ir.padded()) {
min_size += kPadLengthFieldSize;
min_size += push_promise_ir.padding_payload_len();
}
return min_size;
}
} | #include "quiche/spdy/core/spdy_protocol.h"
#include <iostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
std::ostream& operator<<(std::ostream& os,
const SpdyStreamPrecedence precedence) {
if (precedence.is_spdy3_priority()) {
os << "SpdyStreamPrecedence[spdy3_priority=" << precedence.spdy3_priority()
<< "]";
} else {
os << "SpdyStreamPrecedence[parent_id=" << precedence.parent_id()
<< ", weight=" << precedence.weight()
<< ", is_exclusive=" << precedence.is_exclusive() << "]";
}
return os;
}
namespace test {
TEST(SpdyProtocolTest, ClampSpdy3Priority) {
EXPECT_QUICHE_BUG(EXPECT_EQ(7, ClampSpdy3Priority(8)), "Invalid priority: 8");
EXPECT_EQ(kV3LowestPriority, ClampSpdy3Priority(kV3LowestPriority));
EXPECT_EQ(kV3HighestPriority, ClampSpdy3Priority(kV3HighestPriority));
}
TEST(SpdyProtocolTest, ClampHttp2Weight) {
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MinStreamWeight, ClampHttp2Weight(0)),
"Invalid weight: 0");
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MaxStreamWeight, ClampHttp2Weight(300)),
"Invalid weight: 300");
EXPECT_EQ(kHttp2MinStreamWeight, ClampHttp2Weight(kHttp2MinStreamWeight));
EXPECT_EQ(kHttp2MaxStreamWeight, ClampHttp2Weight(kHttp2MaxStreamWeight));
}
TEST(SpdyProtocolTest, Spdy3PriorityToHttp2Weight) {
EXPECT_EQ(256, Spdy3PriorityToHttp2Weight(0));
EXPECT_EQ(220, Spdy3PriorityToHttp2Weight(1));
EXPECT_EQ(183, Spdy3PriorityToHttp2Weight(2));
EXPECT_EQ(147, Spdy3PriorityToHttp2Weight(3));
EXPECT_EQ(110, Spdy3PriorityToHttp2Weight(4));
EXPECT_EQ(74, Spdy3PriorityToHttp2Weight(5));
EXPECT_EQ(37, Spdy3PriorityToHttp2Weight(6));
EXPECT_EQ(1, Spdy3PriorityToHttp2Weight(7));
}
TEST(SpdyProtocolTest, Http2WeightToSpdy3Priority) {
EXPECT_EQ(0u, Http2WeightToSpdy3Priority(256));
EXPECT_EQ(0u, Http2WeightToSpdy3Priority(221));
EXPECT_EQ(1u, Http2WeightToSpdy3Priority(220));
EXPECT_EQ(1u, Http2WeightToSpdy3Priority(184));
EXPECT_EQ(2u, Http2WeightToSpdy3Priority(183));
EXPECT_EQ(2u, Http2WeightToSpdy3Priority(148));
EXPECT_EQ(3u, Http2WeightToSpdy3Priority(147));
EXPECT_EQ(3u, Http2WeightToSpdy3Priority(111));
EXPECT_EQ(4u, Http2WeightToSpdy3Priority(110));
EXPECT_EQ(4u, Http2WeightToSpdy3Priority(75));
EXPECT_EQ(5u, Http2WeightToSpdy3Priority(74));
EXPECT_EQ(5u, Http2WeightToSpdy3Priority(38));
EXPECT_EQ(6u, Http2WeightToSpdy3Priority(37));
EXPECT_EQ(6u, Http2WeightToSpdy3Priority(2));
EXPECT_EQ(7u, Http2WeightToSpdy3Priority(1));
}
TEST(SpdyProtocolTest, IsValidHTTP2FrameStreamId) {
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::DATA));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::DATA));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::HEADERS));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::HEADERS));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::PRIORITY));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::PRIORITY));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::RST_STREAM));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::RST_STREAM));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::CONTINUATION));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::CONTINUATION));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::PUSH_PROMISE));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::PUSH_PROMISE));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::GOAWAY));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::GOAWAY));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::SETTINGS));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::SETTINGS));
EXPECT_FALSE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::PING));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::PING));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(1, SpdyFrameType::WINDOW_UPDATE));
EXPECT_TRUE(IsValidHTTP2FrameStreamId(0, SpdyFrameType::WINDOW_UPDATE));
}
TEST(SpdyProtocolTest, ParseSettingsId) {
SpdyKnownSettingsId setting_id;
EXPECT_FALSE(ParseSettingsId(0, &setting_id));
EXPECT_TRUE(ParseSettingsId(1, &setting_id));
EXPECT_EQ(SETTINGS_HEADER_TABLE_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(2, &setting_id));
EXPECT_EQ(SETTINGS_ENABLE_PUSH, setting_id);
EXPECT_TRUE(ParseSettingsId(3, &setting_id));
EXPECT_EQ(SETTINGS_MAX_CONCURRENT_STREAMS, setting_id);
EXPECT_TRUE(ParseSettingsId(4, &setting_id));
EXPECT_EQ(SETTINGS_INITIAL_WINDOW_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(5, &setting_id));
EXPECT_EQ(SETTINGS_MAX_FRAME_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(6, &setting_id));
EXPECT_EQ(SETTINGS_MAX_HEADER_LIST_SIZE, setting_id);
EXPECT_FALSE(ParseSettingsId(7, &setting_id));
EXPECT_TRUE(ParseSettingsId(8, &setting_id));
EXPECT_EQ(SETTINGS_ENABLE_CONNECT_PROTOCOL, setting_id);
EXPECT_TRUE(ParseSettingsId(9, &setting_id));
EXPECT_EQ(SETTINGS_DEPRECATE_HTTP2_PRIORITIES, setting_id);
EXPECT_FALSE(ParseSettingsId(10, &setting_id));
EXPECT_FALSE(ParseSettingsId(0xFF44, &setting_id));
EXPECT_TRUE(ParseSettingsId(0xFF45, &setting_id));
EXPECT_EQ(SETTINGS_EXPERIMENT_SCHEDULER, setting_id);
EXPECT_FALSE(ParseSettingsId(0xFF46, &setting_id));
}
TEST(SpdyProtocolTest, SettingsIdToString) {
struct {
SpdySettingsId setting_id;
const std::string expected_string;
} test_cases[] = {
{0, "SETTINGS_UNKNOWN_0"},
{SETTINGS_HEADER_TABLE_SIZE, "SETTINGS_HEADER_TABLE_SIZE"},
{SETTINGS_ENABLE_PUSH, "SETTINGS_ENABLE_PUSH"},
{SETTINGS_MAX_CONCURRENT_STREAMS, "SETTINGS_MAX_CONCURRENT_STREAMS"},
{SETTINGS_INITIAL_WINDOW_SIZE, "SETTINGS_INITIAL_WINDOW_SIZE"},
{SETTINGS_MAX_FRAME_SIZE, "SETTINGS_MAX_FRAME_SIZE"},
{SETTINGS_MAX_HEADER_LIST_SIZE, "SETTINGS_MAX_HEADER_LIST_SIZE"},
{7, "SETTINGS_UNKNOWN_7"},
{SETTINGS_ENABLE_CONNECT_PROTOCOL, "SETTINGS_ENABLE_CONNECT_PROTOCOL"},
{SETTINGS_DEPRECATE_HTTP2_PRIORITIES,
"SETTINGS_DEPRECATE_HTTP2_PRIORITIES"},
{0xa, "SETTINGS_UNKNOWN_a"},
{0xFF44, "SETTINGS_UNKNOWN_ff44"},
{0xFF45, "SETTINGS_EXPERIMENT_SCHEDULER"},
{0xFF46, "SETTINGS_UNKNOWN_ff46"}};
for (auto test_case : test_cases) {
EXPECT_EQ(test_case.expected_string,
SettingsIdToString(test_case.setting_id));
}
}
TEST(SpdyStreamPrecedenceTest, Basic) {
SpdyStreamPrecedence spdy3_prec(2);
EXPECT_TRUE(spdy3_prec.is_spdy3_priority());
EXPECT_EQ(2, spdy3_prec.spdy3_priority());
EXPECT_EQ(kHttp2RootStreamId, spdy3_prec.parent_id());
EXPECT_EQ(Spdy3PriorityToHttp2Weight(2), spdy3_prec.weight());
EXPECT_FALSE(spdy3_prec.is_exclusive());
for (bool is_exclusive : {true, false}) {
SpdyStreamPrecedence h2_prec(7, 123, is_exclusive);
EXPECT_FALSE(h2_prec.is_spdy3_priority());
EXPECT_EQ(Http2WeightToSpdy3Priority(123), h2_prec.spdy3_priority());
EXPECT_EQ(7u, h2_prec.parent_id());
EXPECT_EQ(123, h2_prec.weight());
EXPECT_EQ(is_exclusive, h2_prec.is_exclusive());
}
}
TEST(SpdyStreamPrecedenceTest, Clamping) {
EXPECT_QUICHE_BUG(EXPECT_EQ(7, SpdyStreamPrecedence(8).spdy3_priority()),
"Invalid priority: 8");
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MinStreamWeight,
SpdyStreamPrecedence(3, 0, false).weight()),
"Invalid weight: 0");
EXPECT_QUICHE_BUG(EXPECT_EQ(kHttp2MaxStreamWeight,
SpdyStreamPrecedence(3, 300, false).weight()),
"Invalid weight: 300");
}
TEST(SpdyStreamPrecedenceTest, Copying) {
SpdyStreamPrecedence prec1(3);
SpdyStreamPrecedence copy1(prec1);
EXPECT_TRUE(copy1.is_spdy3_priority());
EXPECT_EQ(3, copy1.spdy3_priority());
SpdyStreamPrecedence prec2(4, 5, true);
SpdyStreamPrecedence copy2(prec2);
EXPECT_FALSE(copy2.is_spdy3_priority());
EXPECT_EQ(4u, copy2.parent_id());
EXPECT_EQ(5, copy2.weight());
EXPECT_TRUE(copy2.is_exclusive());
copy1 = prec2;
EXPECT_FALSE(copy1.is_spdy3_priority());
EXPECT_EQ(4u, copy1.parent_id());
EXPECT_EQ(5, copy1.weight());
EXPECT_TRUE(copy1.is_exclusive());
copy2 = prec1;
EXPECT_TRUE(copy2.is_spdy3_priority());
EXPECT_EQ(3, copy2.spdy3_priority());
}
TEST(SpdyStreamPrecedenceTest, Equals) {
EXPECT_EQ(SpdyStreamPrecedence(3), SpdyStreamPrecedence(3));
EXPECT_NE(SpdyStreamPrecedence(3), SpdyStreamPrecedence(4));
EXPECT_EQ(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(1, 2, false));
EXPECT_NE(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(2, 2, false));
EXPECT_NE(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(1, 3, false));
EXPECT_NE(SpdyStreamPrecedence(1, 2, false),
SpdyStreamPrecedence(1, 2, true));
SpdyStreamPrecedence spdy3_prec(3);
SpdyStreamPrecedence h2_prec(spdy3_prec.parent_id(), spdy3_prec.weight(),
spdy3_prec.is_exclusive());
EXPECT_NE(spdy3_prec, h2_prec);
}
TEST(SpdyDataIRTest, Construct) {
absl::string_view s1;
SpdyDataIR d1( 1, s1);
EXPECT_EQ(0u, d1.data_len());
EXPECT_NE(nullptr, d1.data());
const char s2[] = "something";
SpdyDataIR d2( 2, s2);
EXPECT_EQ(absl::string_view(d2.data(), d2.data_len()), s2);
EXPECT_NE(absl::string_view(d1.data(), d1.data_len()), s2);
EXPECT_EQ((int)d1.data_len(), d1.flow_control_window_consumed());
const std::string foo = "foo";
SpdyDataIR d3( 3, foo);
EXPECT_EQ(foo, d3.data());
EXPECT_EQ((int)d3.data_len(), d3.flow_control_window_consumed());
std::string bar = "bar";
SpdyDataIR d4( 4, bar);
EXPECT_EQ("bar", bar);
EXPECT_EQ("bar", absl::string_view(d4.data(), d4.data_len()));
std::string baz = "the quick brown fox";
SpdyDataIR d5( 5, std::move(baz));
EXPECT_EQ("", baz);
EXPECT_EQ(absl::string_view(d5.data(), d5.data_len()), "the quick brown fox");
SpdyDataIR d7( 7, "something else");
EXPECT_EQ(absl::string_view(d7.data(), d7.data_len()), "something else");
SpdyDataIR d8( 8, "shawarma");
d8.set_padding_len(20);
EXPECT_EQ(28, d8.flow_control_window_consumed());
}
}
} | bool ParseSettingsId(SpdySettingsId wire_setting_id,
SpdyKnownSettingsId* setting_id) {
if (wire_setting_id != SETTINGS_EXPERIMENT_SCHEDULER &&
(wire_setting_id < SETTINGS_MIN || wire_setting_id > SETTINGS_MAX)) {
return false;
}
*setting_id = static_cast<SpdyKnownSettingsId>(wire_setting_id);
switch (*setting_id) {
case SETTINGS_HEADER_TABLE_SIZE:
case SETTINGS_ENABLE_PUSH:
case SETTINGS_MAX_CONCURRENT_STREAMS:
case SETTINGS_INITIAL_WINDOW_SIZE:
case SETTINGS_MAX_FRAME_SIZE:
case SETTINGS_MAX_HEADER_LIST_SIZE:
case SETTINGS_ENABLE_CONNECT_PROTOCOL:
case SETTINGS_DEPRECATE_HTTP2_PRIORITIES:
case SETTINGS_EXPERIMENT_SCHEDULER:
return true;
}
return false;
} | TEST(SpdyProtocolTest, ParseSettingsId) {
SpdyKnownSettingsId setting_id;
EXPECT_FALSE(ParseSettingsId(0, &setting_id));
EXPECT_TRUE(ParseSettingsId(1, &setting_id));
EXPECT_EQ(SETTINGS_HEADER_TABLE_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(2, &setting_id));
EXPECT_EQ(SETTINGS_ENABLE_PUSH, setting_id);
EXPECT_TRUE(ParseSettingsId(3, &setting_id));
EXPECT_EQ(SETTINGS_MAX_CONCURRENT_STREAMS, setting_id);
EXPECT_TRUE(ParseSettingsId(4, &setting_id));
EXPECT_EQ(SETTINGS_INITIAL_WINDOW_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(5, &setting_id));
EXPECT_EQ(SETTINGS_MAX_FRAME_SIZE, setting_id);
EXPECT_TRUE(ParseSettingsId(6, &setting_id));
EXPECT_EQ(SETTINGS_MAX_HEADER_LIST_SIZE, setting_id);
EXPECT_FALSE(ParseSettingsId(7, &setting_id));
EXPECT_TRUE(ParseSettingsId(8, &setting_id));
EXPECT_EQ(SETTINGS_ENABLE_CONNECT_PROTOCOL, setting_id);
EXPECT_TRUE(ParseSettingsId(9, &setting_id));
EXPECT_EQ(SETTINGS_DEPRECATE_HTTP2_PRIORITIES, setting_id);
EXPECT_FALSE(ParseSettingsId(10, &setting_id));
EXPECT_FALSE(ParseSettingsId(0xFF44, &setting_id));
EXPECT_TRUE(ParseSettingsId(0xFF45, &setting_id));
EXPECT_EQ(SETTINGS_EXPERIMENT_SCHEDULER, setting_id);
EXPECT_FALSE(ParseSettingsId(0xFF46, &setting_id));
} |
#include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Negate : std::negate<void> {};
NegateOp Create(NegateOp::Attributes) { return {}; }
absl::Status Prepare(NegateOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("negate"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("negate"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(NegateOp& op, const Tensor& input, Tensor& output) {
Negate negate;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), negate,
input, output)
} else if (IsSignedIntTensor(input) || IsFloatTensor(input)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), negate, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.negate: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/negate.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<NegateOp> {
static std::string Get() { return "Negate"; }
};
namespace {
struct Negate : std::negate<void> {
} negate_ref;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseOpShapePropagationTest,
NegateOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Negate, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<NegateOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<NegateOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Negate, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct NegateTest : ::testing::Test {};
TYPED_TEST_SUITE(NegateTest, ArithmeticTestTypes, TestParamNames);
TYPED_TEST(NegateTest, ArithmeticTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), negate_ref);
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedNegateTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedNegateTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedNegateTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = negate_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | absl::Status Prepare(NegateOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("negate"), input,
IsSignedIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("negate"), input, output));
return absl::OkStatus();
} | TYPED_TEST(NegateTest, ArithmeticTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), negate_ref);
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
TYPED_TEST(QuantizedNegateTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = negate_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(NegateOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
} |
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include <atomic>
#include <memory>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/training/queue_runner.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace grappler {
static std::atomic<bool> already_provisioned(false);
SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus)
: Cluster(timeout_s), expected_init_time_s_(0), closing_(false) {
VLOG(1) << "Number of CPU cores: " << num_cpu_cores
<< " Number of GPUs: " << num_gpus;
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), SanitizeThreadSuffix("single_machine"), 2);
(*options_.config.mutable_device_count())["CPU"] = 1;
if (num_gpus > 0) {
(*options_.config.mutable_device_count())["GPU"] = num_gpus;
}
CHECK_GE(num_cpu_cores, 1);
options_.config.set_intra_op_parallelism_threads(num_cpu_cores);
options_.config.add_session_inter_op_thread_pool()->set_num_threads(
num_cpu_cores);
if (timeout_s > 0) {
options_.config.set_operation_timeout_in_ms(timeout_s * 1000);
}
}
SingleMachine::~SingleMachine() {
CloseSession(false ).IgnoreError();
thread_pool_.reset();
}
Status SingleMachine::Provision() {
if (already_provisioned) {
return absl::UnavailableError(
"Can't provision more than one single cluster at a time");
}
TF_RETURN_IF_ERROR(ResetSession());
std::vector<DeviceAttributes> devices;
TF_RETURN_IF_ERROR(session_->ListDevices(&devices));
for (const auto& dev : devices) {
DeviceProperties attr;
if (dev.device_type() == "CPU") {
attr = GetLocalCPUInfo();
} else if (dev.device_type() == "GPU") {
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(dev.name(), &parsed)) {
return absl::InvalidArgumentError(
absl::StrCat("Not able to parse GPU device name: ", dev.name()));
}
TfDeviceId tf_device_id(parsed.id);
PlatformDeviceId platform_device_id;
Status s =
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id);
if (!s.ok()) {
return absl::UnavailableError(
absl::StrCat("Unknown TF GPU device with id ", tf_device_id.value(),
": ", s.message()));
}
attr = GetLocalGPUInfo(platform_device_id);
} else if (dev.device_type().find("XLA") == string::npos) {
attr.set_type(dev.device_type());
}
attr.set_memory_size(dev.memory_limit());
devices_[dev.name()] = attr;
}
already_provisioned = true;
if (cpu_allocator_stats_enabled_) {
TF_RETURN_IF_ERROR(ClearAllocatorStats());
}
return absl::OkStatus();
}
Status SingleMachine::Initialize(const GrapplerItem& item) {
mutex_lock l(this->last_graph_mu_);
if (last_graph_ != &item.graph || last_graph_id_ != item.id) {
init_ops_ = item.init_ops;
expected_init_time_s_ = item.expected_init_time;
last_graph_ = nullptr;
queue_runner_defs_ = item.queue_runners;
last_graph_id_ = item.id;
}
return absl::OkStatus();
}
Status SingleMachine::Shutdown() {
TF_RETURN_IF_ERROR(ShutdownSession());
mutex_lock l(this->last_graph_mu_);
last_graph_ = nullptr;
already_provisioned = false;
return absl::OkStatus();
}
Status SingleMachine::Run(const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* metadata) {
mutex_lock l(this->last_graph_mu_);
if (last_graph_ != &graph_def) {
TF_RETURN_IF_ERROR(ResetSession());
TF_RETURN_IF_ERROR(session_->Create(graph_def));
if (!init_ops_.empty()) {
init_metadata_ = RunMetadata();
int64_t timeout_s = timeout_s_ + expected_init_time_s_;
TF_RETURN_IF_ERROR(
RunWithTimeout({}, init_ops_, &init_metadata_, timeout_s));
for (auto node : *init_metadata_.mutable_cost_graph()->mutable_node()) {
node.clear_compute_cost();
}
init_metadata_.clear_step_stats();
}
RunOptions queue_options = run_options_;
if (queue_options.trace_level() >= RunOptions::HARDWARE_TRACE) {
queue_options.set_trace_level(RunOptions::SOFTWARE_TRACE);
}
for (size_t i = 0; i < queue_runner_defs_.size(); ++i) {
std::unique_ptr<QueueRunner> queue_runner;
TF_RETURN_IF_ERROR(QueueRunner::New(queue_runner_defs_[i],
coordinator_.get(), &queue_runner));
TF_RETURN_IF_ERROR(queue_runner->StartAndCollectCostGraph(session_.get(),
queue_options));
TF_RETURN_IF_ERROR(coordinator_->RegisterRunner(std::move(queue_runner)));
TF_RETURN_IF_ERROR(coordinator_->GetStatus());
}
for (int i = 0; i < NumWarmupSteps(); ++i) {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr));
}
}
if (metadata) {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, metadata));
CostGraphDef queue_costs;
TF_RETURN_IF_ERROR(coordinator_->ExportCostGraph(&queue_costs));
MergeCosts(metadata->mutable_cost_graph(), init_metadata_.cost_graph(),
queue_costs);
} else {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr));
}
last_graph_ = &graph_def;
return absl::OkStatus();
}
Status SingleMachine::EnablePeakMemoryStats() {
EnableCPUAllocatorStats();
cpu_allocator_stats_enabled_ = true;
return absl::OkStatus();
}
Status SingleMachine::GetPeakMemoryUsage(
std::unordered_map<string, uint64>* device_peak_memory) const {
if (!cpu_allocator_stats_enabled_) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation for CPU is not enabled.");
}
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
std::vector<Device*> devices = device_mgr->ListDevices();
device_peak_memory->clear();
for (Device* device : devices) {
auto* allocator = device->GetAllocator(AllocatorAttributes());
if (!allocator->TracksAllocationSizes()) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation is not enabled.");
}
absl::optional<AllocatorStats> stats = allocator->GetStats();
(*device_peak_memory)[device->name()] =
(stats ? stats->peak_bytes_in_use : 0);
}
return absl::OkStatus();
}
Status SingleMachine::RunWithTimeout(
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* run_metadata) {
return RunWithTimeout(feed, fetch, run_metadata, timeout_s_);
}
Status SingleMachine::RunWithTimeout(
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* run_metadata,
int64_t timeout_s) {
{
mutex_lock l(close_mu_);
CHECK(!closing_);
}
auto status = std::make_shared<Status>();
auto local_metadata = std::make_shared<RunMetadata>();
const bool executed_in_time = ExecuteWithTimeout(
[this, status, local_metadata, feed, fetch]() {
*status = session_->Run(run_options_, feed, {}, fetch, nullptr,
local_metadata.get());
},
timeout_s * 1000, thread_pool_.get());
if (!executed_in_time) {
return absl::DeadlineExceededError(absl::StrCat(
"Failed to run the graph after ", timeout_s, " seconds, aborting"));
} else if (run_metadata && status->ok()) {
*run_metadata = *local_metadata;
}
return *status;
}
Status SingleMachine::CloseSession(bool use_timeout) {
if (!session_ || !thread_pool_) {
return absl::OkStatus();
}
{
mutex_lock l(close_mu_);
if (!closing_) {
closing_ = true;
}
}
const bool executed_in_time = ExecuteWithTimeout(
[&]() {
if (this->coordinator_) {
this->coordinator_->RequestStop().IgnoreError();
while (!this->coordinator_->AllRunnersStopped()) {
Env::Default()->SleepForMicroseconds(1000000);
}
this->session_->Close().IgnoreError();
this->coordinator_.reset();
} else {
this->session_->Close().IgnoreError();
}
mutex_lock l2(close_mu_);
closing_ = false;
},
use_timeout ? timeout_s_ * 1000 : -1, thread_pool_.get());
if (!executed_in_time) {
return absl::UnavailableError(
absl::StrCat("Failed to close the previous session after ", timeout_s_,
" seconds, aborting"));
}
return absl::OkStatus();
}
Status SingleMachine::ShutdownSession() {
TF_RETURN_IF_ERROR(CloseSession(true ));
auto n = std::make_shared<Notification>();
Env::Default()->SchedClosure([this, n]() {
thread_pool_.reset();
n->Notify();
});
int64_t timeout_us = 1000000ll * timeout_s_;
const bool notified = WaitForNotificationWithTimeout(n.get(), timeout_us);
if (!notified) {
return absl::UnavailableError(absl::StrCat(
"The session is still running graphs after ", timeout_s_, " seconds"));
}
return absl::OkStatus();
}
Status SingleMachine::ResetSession() {
if (session_) {
LOG(INFO) << "Cleaning up previous session";
TF_RETURN_IF_ERROR(ShutdownSession());
session_.reset();
}
LOG(INFO) << "Starting new session";
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), SanitizeThreadSuffix("single_machine"), 2);
session_.reset(NewSession(options_));
if (!session_) {
return absl::UnknownError("Failed to create session");
}
coordinator_ = std::make_unique<Coordinator>();
device_set_ = std::make_unique<DeviceSet>();
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
for (auto d : device_mgr->ListDevices()) {
device_set_->AddDevice(d);
}
return absl::OkStatus();
}
void SingleMachine::MergeCosts(CostGraphDef* graph_costs,
const CostGraphDef& init_costs,
const CostGraphDef& queue_costs) {
graph_costs->mutable_node()->Reserve(graph_costs->node_size() +
init_costs.node_size() +
queue_costs.node_size());
std::unordered_set<string> nodes_seen;
int queue_costs_id_offset = graph_costs->node_size();
for (const auto& node : graph_costs->node()) {
nodes_seen.insert(node.name());
if (node.id() >= queue_costs_id_offset) {
queue_costs_id_offset = node.id() + 1;
}
}
int init_costs_id_offset = queue_costs_id_offset + queue_costs.node_size();
for (const auto& node : queue_costs.node()) {
if (nodes_seen.find(node.name()) != nodes_seen.end()) {
continue;
}
auto* new_node = graph_costs->add_node();
new_node->MergeFrom(node);
new_node->set_id(node.id() + queue_costs_id_offset);
if (new_node->id() >= init_costs_id_offset) {
init_costs_id_offset = new_node->id() + 1;
}
for (auto& input_info : *new_node->mutable_input_info()) {
input_info.set_preceding_node(input_info.preceding_node() +
queue_costs_id_offset);
}
for (auto& control_input : *new_node->mutable_control_input()) {
control_input += queue_costs_id_offset;
}
}
for (const auto& node : init_costs.node()) {
if (nodes_seen.find(node.name()) != nodes_seen.end()) {
continue;
}
auto* new_node = graph_costs->add_node();
new_node->MergeFrom(node);
new_node->set_id(node.id() + init_costs_id_offset);
for (auto& input_info : *new_node->mutable_input_info()) {
input_info.set_preceding_node(input_info.preceding_node() +
init_costs_id_offset);
}
for (auto& control_input : *new_node->mutable_control_input()) {
control_input += init_costs_id_offset;
}
}
}
Status SingleMachine::ClearAllocatorStats() const {
if (!cpu_allocator_stats_enabled_) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation for CPU is not enabled.");
}
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
std::vector<Device*> devices = device_mgr->ListDevices();
for (Device* device : devices) {
auto* allocator = device->GetAllocator(AllocatorAttributes());
if (!allocator->TracksAllocationSizes()) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation is not enabled.");
}
if (!allocator->ClearStats()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Clearing allocation stats is not supported for ",
device->name()));
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/clusters/single_machine.h"
#include <memory>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class SingleMachineTest : public ::testing::Test {
public:
void SetUp() override {
#if TENSORFLOW_USE_ROCM
int timeout_s = 10;
#else
int timeout_s = 5;
#endif
#ifdef THREAD_SANITIZER
timeout_s *= 5;
#endif
cluster_ = std::make_unique<SingleMachine>(timeout_s, 3 ,
0 );
TF_CHECK_OK(cluster_->EnablePeakMemoryStats());
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override {
if (cluster_) {
TF_CHECK_OK(cluster_->Shutdown());
}
cluster_.reset();
}
protected:
std::unique_ptr<SingleMachine> cluster_;
};
TEST_F(SingleMachineTest, ClusterType) {
CHECK_EQ("single_machine", cluster_->type());
}
TEST_F(SingleMachineTest, CostModel) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
const int64_t start_micros = Env::Default()->NowMicros();
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
const int64_t run_duration_micros =
Env::Default()->NowMicros() - start_micros;
EXPECT_LE(4, metadata.cost_graph().node_size());
for (const auto& node : metadata.cost_graph().node()) {
if (node.name()[0] == '_' || node.name().find("/_") != string::npos) {
continue;
}
#ifndef INTEL_MKL
EXPECT_EQ(1, node.output_info_size());
#endif
EXPECT_LE(8, node.output_info(0).size());
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
EXPECT_LE(0, node.compute_cost());
EXPECT_GE(run_duration_micros, node.compute_cost());
}
}
TEST_F(SingleMachineTest, Queue) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, true,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
}
TEST_F(SingleMachineTest, MultipleItems) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
for (int i = 0; i < 3; ++i) {
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata1;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata1));
RunMetadata metadata2;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata2));
EXPECT_LE(6, metadata1.cost_graph().node_size());
for (const auto& node : metadata1.cost_graph().node()) {
if (node.name()[0] == '_' || node.name().find("/_") != string::npos ||
node.name() == "queue") {
continue;
}
#ifndef INTEL_MKL
EXPECT_EQ(1, node.output_info_size());
#endif
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
}
for (int i = 0; i < metadata1.cost_graph().node_size(); ++i) {
metadata1.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0);
metadata1.clear_step_stats();
}
for (int i = 0; i < metadata2.cost_graph().node_size(); ++i) {
metadata2.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0);
metadata2.clear_step_stats();
}
string s1;
::tensorflow::protobuf::TextFormat::PrintToString(metadata1, &s1);
string s2;
::tensorflow::protobuf::TextFormat::PrintToString(metadata2, &s2);
EXPECT_EQ(s1, s2);
}
}
TEST_F(SingleMachineTest, GraphOptimizations) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto zero = ops::Const(root.WithOpName("zero"), 0.0f, {2, 3});
auto one = ops::Const(root.WithOpName("one"), 1.0f, {2, 3});
auto add = ops::Add(root.WithOpName("add"), zero, one);
auto square = ops::Square(root.WithOpName("square"), add);
auto new_shape = ops::Const(root.WithOpName("new_shape"), {3, -1}, {2});
auto reshaped = ops::Reshape(root.WithOpName("reshaped"), square, new_shape);
auto final_shape = ops::Shape(root.WithOpName("final_shape"), reshaped);
auto expected_shape =
ops::Const(root.WithOpName("expected_shape"), {3, 2}, {2});
auto valid =
ops::Equal(root.WithOpName("valid"), final_shape, expected_shape);
auto all_dims = ops::Const(root.WithOpName("all_dims"), {0}, {1});
auto all_valid = ops::All(root.WithOpName("all_valid"), valid, all_dims);
auto assert_valid = ops::Assert(root.WithOpName("assert_valid"), all_valid,
{final_shape.output});
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("assert_valid");
for (auto& node : *item.graph.mutable_node()) {
node.set_device("/cpu:0");
}
TF_CHECK_OK(cluster_->Shutdown());
cluster_->DisableOptimizer(true);
TF_CHECK_OK(cluster_->Provision());
RunMetadata metadata;
TF_CHECK_OK(cluster_->Initialize(item));
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::set<string> cost_nodes;
for (const auto& node : metadata.cost_graph().node()) {
#ifdef INTEL_MKL
if (node.name()[0] == '_' || node.name().find("/_") != string::npos) {
continue;
}
cost_nodes.insert(node.name());
#else
if (node.name()[0] != '_') {
cost_nodes.insert(node.name());
}
#endif
}
const std::set<string> expected_cost_nodes = {
"zero", "one", "add", "square",
"new_shape", "reshaped", "final_shape", "expected_shape",
"valid", "all_dims", "all_valid", "assert_valid"};
EXPECT_EQ(expected_cost_nodes, cost_nodes);
}
TEST_F(SingleMachineTest, TimeOuts) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q = ops::FIFOQueue(root.WithOpName("queue"), {DataType::DT_INT32});
auto dequeue =
ops::QueueDequeue(root.WithOpName("dequeue"), q, {DataType::DT_INT32});
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("dequeue");
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
Status s1 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata);
EXPECT_TRUE(errors::IsDeadlineExceeded(s1));
Status s2 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata);
EXPECT_TRUE(errors::IsDeadlineExceeded(s2));
}
static void RunInfiniteTFLoop() {
GrapplerItem item;
NodeDef* shp = item.graph.add_node();
shp->set_name("shape");
shp->set_op("Const");
(*shp->mutable_attr())["dtype"].set_type(DT_INT32);
Tensor shp_tensor(DT_INT32, TensorShape({1}));
shp_tensor.flat<int32>()(0) = 1;
shp_tensor.AsProtoTensorContent(
(*shp->mutable_attr())["value"].mutable_tensor());
NodeDef* r = item.graph.add_node();
r->set_name("random");
r->set_op("RandomUniform");
(*r->mutable_attr())["dtype"].set_type(DT_FLOAT);
(*r->mutable_attr())["T"].set_type(DT_INT32);
*r->add_input() = "shape";
NodeDef* e = item.graph.add_node();
e->set_name("while/Enter");
e->set_op("Enter");
(*e->mutable_attr())["T"].set_type(DT_FLOAT);
(*e->mutable_attr())["frame_name"].set_s("while/while/");
*e->add_input() = "random";
NodeDef* m = item.graph.add_node();
m->set_name("while/Merge");
m->set_op("Merge");
(*m->mutable_attr())["T"].set_type(DT_FLOAT);
(*m->mutable_attr())["N"].set_i(2);
*m->add_input() = "while/Enter";
*m->add_input() = "while/NextIteration";
NodeDef* t = item.graph.add_node();
t->set_name("always_true");
t->set_op("Const");
(*t->mutable_attr())["dtype"].set_type(DT_BOOL);
*t->add_input() = "^while/Merge";
Tensor true_tensor(DT_BOOL, TensorShape());
true_tensor.flat<bool>()(0) = true;
true_tensor.AsProtoTensorContent(
(*t->mutable_attr())["value"].mutable_tensor());
NodeDef* c = item.graph.add_node();
c->set_name("while/LoopCond");
c->set_op("LoopCond");
*c->add_input() = "always_true";
NodeDef* s = item.graph.add_node();
s->set_name("while/Switch");
(*s->mutable_attr())["T"].set_type(DT_FLOAT);
s->set_op("Switch");
*s->add_input() = "while/Merge";
*s->add_input() = "while/LoopCond";
NodeDef* i = item.graph.add_node();
i->set_name("while/Identity");
i->set_op("Identity");
(*i->mutable_attr())["T"].set_type(DT_FLOAT);
*i->add_input() = "while/Switch:1";
NodeDef* n = item.graph.add_node();
n->set_name("while/NextIteration");
n->set_op("NextIteration");
(*n->mutable_attr())["T"].set_type(DT_FLOAT);
*n->add_input() = "while/Identity";
NodeDef* x = item.graph.add_node();
x->set_name("while/Exit");
x->set_op("Exit");
(*x->mutable_attr())["T"].set_type(DT_FLOAT);
*x->add_input() = "while/Switch";
item.fetch.push_back("while/Exit");
SingleMachine cluster(5, 3, 0);
TF_CHECK_OK(cluster.Provision());
TF_CHECK_OK(cluster.Initialize(item));
Status s1 = cluster.Run(item.graph, item.feed, item.fetch, nullptr);
if (!errors::IsDeadlineExceeded(s1)) {
LOG(ERROR) << "Expected 'deadline exceeded' error, got " << s1;
_exit(1);
}
Status s2 = cluster.Shutdown();
if (!errors::IsUnavailable(s2)) {
LOG(ERROR) << "Expected 'unavailable' error, got " << s2;
_exit(2);
}
_exit(0);
}
TEST_F(SingleMachineTest, InfiniteLoops) {
#if !(TENSORFLOW_USE_ROCM)
TF_CHECK_OK(cluster_->Shutdown());
EXPECT_EXIT(RunInfiniteTFLoop(), ::testing::ExitedWithCode(0), ".*");
#endif
}
TEST_F(SingleMachineTest, InitializationMemory) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
int batch_size = 10;
Output x =
ops::RandomNormal(s.WithOpName("x"), {batch_size, 1}, DataType::DT_FLOAT);
Output v = ops::Variable(s.WithOpName("v"), TensorShape({batch_size, 1}),
DataType::DT_FLOAT);
Output init = ops::Assign(s.WithOpName("init"), v, x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.init_ops.push_back(init.name());
item.fetch.push_back(v.name());
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
bool found = false;
for (const auto& node : metadata.cost_graph().node()) {
found |= (node.name() == NodeName(init.name()));
}
EXPECT_TRUE(found);
}
namespace {
template <class T>
inline void SetNodeAttr(const string& key, const T& value, NodeDef* node) {
AttrValue attr_value;
SetAttrValue(value, &attr_value);
auto* attr_map = node->mutable_attr();
(*attr_map)[key] = attr_value;
}
template <>
inline void SetNodeAttr(const string& key, const Tensor& tensor,
NodeDef* node) {
TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
SetNodeAttr(key, tensor_proto, node);
}
}
TEST_F(SingleMachineTest, PersistentMemory) {
GrapplerItem item;
const DataType key_dtype = DT_INT64;
const DataType data_dtype = DT_INT64;
NodeDef* hashtable_node = item.graph.add_node();
hashtable_node->set_op("HashTable");
hashtable_node->set_name("hash_table");
SetNodeAttr("key_dtype", key_dtype, hashtable_node);
SetNodeAttr("value_dtype", data_dtype, hashtable_node);
NodeDef* keys_node = item.graph.add_node();
keys_node->set_op("Const");
keys_node->set_name("table_keys");
SetNodeAttr("dtype", key_dtype, keys_node);
Tensor keys(key_dtype, TensorShape{2});
keys.vec<int64_t>()(0) = 123;
keys.vec<int64_t>()(1) = 321;
SetNodeAttr("value", keys, keys_node);
NodeDef* values_node = item.graph.add_node();
values_node->set_op("Const");
values_node->set_name("table_values");
SetNodeAttr("dtype", data_dtype, values_node);
Tensor values(data_dtype, TensorShape{2});
values.vec<int64_t>()(0) = 789;
values.vec<int64_t>()(1) = 987;
SetNodeAttr("value", values, values_node);
NodeDef* init_table_node = item.graph.add_node();
init_table_node->set_op("InitializeTable");
init_table_node->set_name("initialize_table");
SetNodeAttr("Tkey", key_dtype, init_table_node);
SetNodeAttr("Tval", data_dtype, init_table_node);
*init_table_node->add_input() = "hash_table";
*init_table_node->add_input() = "table_keys";
*init_table_node->add_input() = "table_values";
item.init_ops.push_back(init_table_node->name());
NodeDef* query_node = item.graph.add_node();
query_node->set_op("Const");
query_node->set_name("query");
SetNodeAttr("dtype", key_dtype, query_node);
Tensor query(key_dtype, TensorShape({}));
query.flat<int64_t>()(0) = 0;
SetNodeAttr("value", query, query_node);
NodeDef* default_value_node = item.graph.add_node();
default_value_node->set_op("Const");
default_value_node->set_name("default_table_value");
SetNodeAttr("dtype", data_dtype, default_value_node);
Tensor dflt(data_dtype, TensorShape({}));
dflt.flat<int64_t>()(0) = 456;
SetNodeAttr("value", dflt, default_value_node);
NodeDef* lookup_node = item.graph.add_node();
lookup_node->set_op("LookupTableFind");
lookup_node->set_name("table_lookup");
SetNodeAttr("Tin", key_dtype, lookup_node);
SetNodeAttr("Tout", data_dtype, lookup_node);
*lookup_node->add_input() = "hash_table";
*lookup_node->add_input() = "query";
*lookup_node->add_input() = "default_table_value";
item.fetch.push_back(lookup_node->name());
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
bool found_table_init = false;
bool found_hashtable = false;
for (const auto& node : metadata.cost_graph().node()) {
if (node.name() == "hash_table") {
found_hashtable = true;
EXPECT_EQ(0, node.persistent_memory_size());
} else if (node.name() == "initialize_table") {
found_table_init = true;
EXPECT_LE(4 * sizeof(int64_t), node.persistent_memory_size());
}
}
EXPECT_TRUE(found_table_init);
EXPECT_TRUE(found_hashtable);
}
GrapplerItem CreateGrapplerItemWithResourceMemory() {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), TensorShape({128, 256}),
DataType::DT_FLOAT);
Output a_init =
ops::RandomNormal(s.WithOpName("a/init"), {128, 256}, DataType::DT_FLOAT);
Output a_init_assign = ops::Assign(s.WithOpName("a/init/assign"), a, a_init);
Output b =
ops::VarHandleOp(s.WithOpName("b"), DataType::DT_FLOAT, {256, 512});
Output b_read =
ops::ReadVariableOp(s.WithOpName("b/read"), b, DataType::DT_FLOAT);
Output b_init =
ops::RandomNormal(s.WithOpName("b/init"), {256, 512}, DataType::DT_FLOAT);
auto b_init_assign =
ops::AssignVariableOp(s.WithOpName("b/init/assign"), b, b_init);
ops::FIFOQueue queue(s.WithOpName("queue"), {DataType::DT_STRING});
Output some_string =
ops::Const(s.WithOpName("some_string"), string("nothing"));
ops::QueueEnqueue enqueue(s.WithOpName("enqueue"), queue, {some_string});
ops::QueueDequeue dequeue(s.WithOpName("dequeue"), queue,
{DataType::DT_STRING});
ops::IdentityReader reader(s.WithOpName("identity_reader"));
ops::ReaderRead read(s.WithOpName("read_from_queue"), reader, queue);
Output var_mul = ops::MatMul(s.WithOpName("var_matmul"), a, b_read);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
QueueRunnerDef queue_runner;
queue_runner.set_queue_name("queue");
*queue_runner.add_enqueue_op_name() = "enqueue";
item.queue_runners.push_back(queue_runner);
item.init_ops.push_back("a/init/assign");
item.init_ops.push_back("b/init/assign");
item.fetch.push_back("var_matmul");
item.fetch.push_back("dequeue");
return item;
}
#if defined(PLATFORM_GOOGLE)
TEST_F(SingleMachineTest, ReleaseMemoryAfterDestruction) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Initialize(item));
std::unordered_map<string, uint64> device_peak_memory_before;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_before));
EXPECT_EQ(device_peak_memory_before.size(), 1);
EXPECT_LT(device_peak_memory_before.begin()->second, 400);
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::unordered_map<string, uint64> device_peak_memory;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
EXPECT_EQ(device_peak_memory.size(), 1);
EXPECT_GT(device_peak_memory.begin()->second, 0);
TF_CHECK_OK(cluster_->Shutdown());
TF_CHECK_OK(cluster_->Provision());
std::unordered_map<string, uint64> device_peak_memory_after;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_after));
TF_CHECK_OK(cluster_->Shutdown());
EXPECT_EQ(device_peak_memory_before.size(), 1);
EXPECT_EQ(device_peak_memory_after.size(), 1);
EXPECT_LT(device_peak_memory_before.begin()->second, 400);
EXPECT_LT(device_peak_memory_after.begin()->second, 400);
}
TEST_F(SingleMachineTest, PeakMemory) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::unordered_map<string, uint64> device_peak_memory;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
ASSERT_NE(
device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"),
device_peak_memory.end());
uint64 cpu_memory =
device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"];
EXPECT_GT(cpu_memory, 0);
TF_CHECK_OK(cluster_->Shutdown());
TF_CHECK_OK(cluster_->Provision());
device_peak_memory.clear();
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
TF_CHECK_OK(cluster_->Shutdown());
ASSERT_NE(
device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"),
device_peak_memory.end());
cpu_memory =
device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"];
EXPECT_LT(cpu_memory, 200);
}
TEST_F(SingleMachineTest, PeakMemoryStatsNotEnabled) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Shutdown());
cluster_.reset();
SingleMachine cluster(60 , 3 ,
0 );
TF_CHECK_OK(cluster.Provision());
TF_CHECK_OK(cluster.Initialize(item));
std::unordered_map<string, uint64> device_peak_memory;
Status s = cluster.GetPeakMemoryUsage(&device_peak_memory);
TF_CHECK_OK(cluster.Shutdown());
ASSERT_FALSE(s.ok());
EXPECT_TRUE(errors::IsInvalidArgument(s));
}
#endif
}
}
} | Status SingleMachine::Shutdown() {
TF_RETURN_IF_ERROR(ShutdownSession());
mutex_lock l(this->last_graph_mu_);
last_graph_ = nullptr;
already_provisioned = false;
return absl::OkStatus();
} | |
#include "quiche/quic/core/quic_packet_number.h"
#include <algorithm>
#include <limits>
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace quic {
void QuicPacketNumber::Clear() { packet_number_ = UninitializedPacketNumber(); }
void QuicPacketNumber::UpdateMax(QuicPacketNumber new_value) {
if (!new_value.IsInitialized()) {
return;
}
if (!IsInitialized()) {
packet_number_ = new_value.ToUint64();
} else {
packet_number_ = std::max(packet_number_, new_value.ToUint64());
}
}
uint64_t QuicPacketNumber::Hash() const {
QUICHE_DCHECK(IsInitialized());
return packet_number_;
}
uint64_t QuicPacketNumber::ToUint64() const {
QUICHE_DCHECK(IsInitialized());
return packet_number_;
}
bool QuicPacketNumber::IsInitialized() const {
return packet_number_ != UninitializedPacketNumber();
}
QuicPacketNumber& QuicPacketNumber::operator++() {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_LT(ToUint64(), std::numeric_limits<uint64_t>::max() - 1);
#endif
packet_number_++;
return *this;
}
QuicPacketNumber QuicPacketNumber::operator++(int) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_LT(ToUint64(), std::numeric_limits<uint64_t>::max() - 1);
#endif
QuicPacketNumber previous(*this);
packet_number_++;
return previous;
}
QuicPacketNumber& QuicPacketNumber::operator--() {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GE(ToUint64(), 1UL);
#endif
packet_number_--;
return *this;
}
QuicPacketNumber QuicPacketNumber::operator--(int) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GE(ToUint64(), 1UL);
#endif
QuicPacketNumber previous(*this);
packet_number_--;
return previous;
}
QuicPacketNumber& QuicPacketNumber::operator+=(uint64_t delta) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GT(std::numeric_limits<uint64_t>::max() - ToUint64(), delta);
#endif
packet_number_ += delta;
return *this;
}
QuicPacketNumber& QuicPacketNumber::operator-=(uint64_t delta) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GE(ToUint64(), delta);
#endif
packet_number_ -= delta;
return *this;
}
std::string QuicPacketNumber::ToString() const {
if (!IsInitialized()) {
return "uninitialized";
}
return absl::StrCat(ToUint64());
}
std::ostream& operator<<(std::ostream& os, const QuicPacketNumber& p) {
os << p.ToString();
return os;
}
} | #include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
TEST(QuicPacketNumberTest, BasicTest) {
QuicPacketNumber num;
EXPECT_FALSE(num.IsInitialized());
QuicPacketNumber num2(10);
EXPECT_TRUE(num2.IsInitialized());
EXPECT_EQ(10u, num2.ToUint64());
EXPECT_EQ(10u, num2.Hash());
num2.UpdateMax(num);
EXPECT_EQ(10u, num2.ToUint64());
num2.UpdateMax(QuicPacketNumber(9));
EXPECT_EQ(10u, num2.ToUint64());
num2.UpdateMax(QuicPacketNumber(11));
EXPECT_EQ(11u, num2.ToUint64());
num2.Clear();
EXPECT_FALSE(num2.IsInitialized());
num2.UpdateMax(QuicPacketNumber(9));
EXPECT_EQ(9u, num2.ToUint64());
QuicPacketNumber num4(0);
EXPECT_TRUE(num4.IsInitialized());
EXPECT_EQ(0u, num4.ToUint64());
EXPECT_EQ(0u, num4.Hash());
num4.Clear();
EXPECT_FALSE(num4.IsInitialized());
}
TEST(QuicPacketNumberTest, Operators) {
QuicPacketNumber num(100);
EXPECT_EQ(QuicPacketNumber(100), num++);
EXPECT_EQ(QuicPacketNumber(101), num);
EXPECT_EQ(QuicPacketNumber(101), num--);
EXPECT_EQ(QuicPacketNumber(100), num);
EXPECT_EQ(QuicPacketNumber(101), ++num);
EXPECT_EQ(QuicPacketNumber(100), --num);
QuicPacketNumber num3(0);
EXPECT_EQ(QuicPacketNumber(0), num3++);
EXPECT_EQ(QuicPacketNumber(1), num3);
EXPECT_EQ(QuicPacketNumber(2), ++num3);
EXPECT_EQ(QuicPacketNumber(2), num3--);
EXPECT_EQ(QuicPacketNumber(1), num3);
EXPECT_EQ(QuicPacketNumber(0), --num3);
}
}
}
} | uint64_t QuicPacketNumber::Hash() const {
QUICHE_DCHECK(IsInitialized());
return packet_number_;
} | #include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
TEST(QuicPacketNumberTest, BasicTest) {
QuicPacketNumber num;
EXPECT_FALSE(num.IsInitialized());
QuicPacketNumber num2(10);
EXPECT_TRUE(num2.IsInitialized());
EXPECT_EQ(10u, num2.ToUint64());
EXPECT_EQ(10u, num2.Hash());
num2.UpdateMax(num);
EXPECT_EQ(10u, num2.ToUint64());
num2.UpdateMax(QuicPacketNumber(9));
EXPECT_EQ(10u, num2.ToUint64());
num2.UpdateMax(QuicPacketNumber(11));
EXPECT_EQ(11u, num2.ToUint64());
num2.Clear();
EXPECT_FALSE(num2.IsInitialized());
num2.UpdateMax(QuicPacketNumber(9));
EXPECT_EQ(9u, num2.ToUint64());
QuicPacketNumber num4(0);
EXPECT_TRUE(num4.IsInitialized());
EXPECT_EQ(0u, num4.ToUint64());
EXPECT_EQ(0u, num4.Hash());
num4.Clear();
EXPECT_FALSE(num4.IsInitialized());
} |
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include <cassert>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <string_view>
#include <type_traits>
#include <variant>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
namespace mlir {
namespace interpreter {
namespace {
struct TypeStr {
static std::string_view Get(bool) { return "i1"; }
static std::string_view Get(int64_t) { return "i64"; }
static std::string_view Get(int32_t) { return "i32"; }
static std::string_view Get(int16_t) { return "i16"; }
static std::string_view Get(int8_t) { return "i8"; }
static std::string_view Get(uint64_t) { return "ui64"; }
static std::string_view Get(uint32_t) { return "ui32"; }
static std::string_view Get(uint16_t) { return "ui16"; }
static std::string_view Get(uint8_t) { return "ui8"; }
static std::string_view Get(float) { return "f32"; }
static std::string_view Get(double) { return "f64"; }
static std::string_view Get(std::complex<float>) { return "complex<f32>"; }
static std::string_view Get(std::complex<double>) { return "complex<f64>"; }
};
struct InterpreterValuePrinter {
llvm::raw_ostream& os;
template <typename T>
void operator()(const TensorOrMemref<T>& t) {
if (!t.buffer) {
os << "Memref: null";
return;
}
if (t.view.is_vector) {
os << "vector<";
} else {
os << "TensorOrMemref<";
}
ArrayRef<int64_t> sizes = t.view.sizes;
for (int64_t size : sizes.drop_back(t.view.num_vector_dims.value_or(0))) {
os << size << "x";
}
if (t.view.num_vector_dims) {
os << "vector<";
for (int64_t size : sizes.take_back(*t.view.num_vector_dims)) {
os << size << "x";
}
os << TypeStr::Get(T{}) << ">>: ";
} else {
os << TypeStr::Get(T{}) << ">: ";
}
SmallVector<int64_t> indices(t.view.Rank() +
t.view.num_vector_dims.value_or(0));
std::function<void(int64_t)> print;
print = [&](int64_t dim) {
if (dim == indices.size()) {
PrintScalar(t.at(indices));
} else {
os << "[";
for (int64_t i = 0; i < t.view.sizes[dim]; ++i) {
if (i > 0) os << ", ";
indices[dim] = i;
print(dim + 1);
}
os << "]";
}
};
if (t.buffer->Deallocated()) {
os << "<<deallocated>>";
} else {
print(0);
}
}
void operator()(const Tuple& t) {
os << "(";
bool first = true;
for (const auto& v : t.values) {
if (!first) os << ", ";
first = false;
v->Print(os);
}
os << ")";
}
template <typename T>
void operator()(const T& t) {
os << TypeStr::Get(t) << ": ";
PrintScalar(t);
}
template <typename T>
void PrintScalar(const T& v) {
os << v;
}
template <typename T>
void PrintScalar(const std::complex<T>& v) {
os << v.real() << (v.imag() >= 0 ? "+" : "") << v.imag() << "i";
}
void PrintScalar(bool v) { os << (v ? "true" : "false"); }
void PrintScalar(int8_t v) { os << (int)v; }
void PrintScalar(uint8_t v) { os << (int)v; }
};
}
void InterpreterValue::Print(llvm::raw_ostream& os) const {
std::visit(InterpreterValuePrinter{os}, storage);
}
std::string InterpreterValue::ToString() const {
std::string buf;
llvm::raw_string_ostream os(buf);
Print(os);
return buf;
}
InterpreterValue InterpreterValue::ExtractElement(
llvm::ArrayRef<int64_t> indices) const {
return std::visit(
[&](auto& it) -> InterpreterValue {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
if (it.view.num_vector_dims) {
return {it.VectorAt(indices)};
} else {
return {it.at(indices)};
}
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("extracting from tuples is unsupported");
} else {
return {it};
}
},
storage);
}
void InterpreterValue::InsertElement(llvm::ArrayRef<int64_t> indices,
const InterpreterValue& value) {
std::visit(
[&](auto& it) {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
if (it.view.num_vector_dims) {
auto subview = it.VectorAt(indices);
const auto& values = std::get<T>(value.storage);
assert(values.view.sizes == subview.view.sizes &&
"mismatched sizes");
for (const auto& index : subview.view.Indices()) {
subview.at(index) = values.at(index);
}
} else {
it.at(indices) = std::get<typename T::element_type>(value.storage);
}
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("inserting into tuples is unsupported");
} else {
it = std::get<T>(value.storage);
}
},
storage);
}
void InterpreterValue::Fill(
const std::function<InterpreterValue(llvm::ArrayRef<int64_t> indices)>& f) {
std::visit(
[&](auto& it) {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
for (const auto& indices : it.view.Indices()) {
if (it.view.num_vector_dims) {
auto subview = it.VectorAt(indices);
auto value = std::get<T>(f(indices).storage);
for (const auto& index : subview.view.Indices()) {
subview.at(index) = value.at(index);
}
} else {
it.at(indices) =
std::get<typename T::element_type>(f(indices).storage);
}
}
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("Filling tuples is unsupported");
} else {
it = std::get<T>(f({}).storage);
}
},
storage);
}
InterpreterValue InterpreterValue::Clone(ArrayRef<int64_t> layout) const {
return std::visit(
[&](const auto& it) -> InterpreterValue {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
return {it.Clone(layout)};
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("cloning tuples is unsupported");
} else {
return {it};
}
},
storage);
}
InterpreterValue InterpreterValue::CoerceLayout(
ArrayRef<int64_t> layout) const {
const auto& view = this->View();
if (view.strides == BufferView::GetStridesForLayout(view.sizes, layout)) {
return *this;
}
return Clone(layout);
}
InterpreterValue InterpreterValue::TypedAlike(
llvm::ArrayRef<int64_t> shape) const {
return std::visit(
[&](const auto& it) -> InterpreterValue {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
return {T::Empty(shape)};
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("TypedAlike for tuples is unsupported");
} else {
return {TensorOrMemref<T>::Empty(shape)};
}
},
storage);
}
InterpreterValue InterpreterValue::MakeTensor(mlir::Type element_type,
SmallVector<int64_t> shape) {
auto vector_ty = llvm::dyn_cast<VectorType>(element_type);
if (vector_ty) {
llvm::copy(vector_ty.getShape(), std::back_inserter(shape));
}
return DispatchScalarType(element_type, [&](auto dummy) -> InterpreterValue {
auto tensor = TensorOrMemref<decltype(dummy)>::Empty(shape);
if (vector_ty) {
tensor.view.num_vector_dims = vector_ty.getRank();
}
return {tensor};
});
}
BufferView& InterpreterValue::View() {
return std::visit(
[](auto& it) -> BufferView& {
if constexpr (is_tensor_or_memref_v<decltype(it)>) {
return it.view;
}
llvm_unreachable("view is only supported for tensors");
},
storage);
}
const BufferView& InterpreterValue::View() const {
return std::visit(
[](const auto& it) -> const BufferView& {
if constexpr (is_tensor_or_memref_v<decltype(it)>) {
return it.view;
}
llvm_unreachable("view is only supported for tensors");
},
storage);
}
bool InterpreterValue::IsTensor() const {
return std::visit(
[](const auto& it) { return is_tensor_or_memref_v<decltype(it)>; },
storage);
}
InterpreterValue InterpreterValue::AsUnitTensor(bool is_vector) const {
auto result = TypedAlike({});
result.InsertElement({}, *this);
result.View().is_vector = is_vector;
return result;
}
bool Tuple::operator==(const Tuple& other) const {
if (other.values.size() != values.size()) return false;
for (const auto& [lhs, rhs] : llvm::zip(values, other.values)) {
if (!(*lhs == *rhs)) return false;
}
return true;
}
std::shared_ptr<Buffer> InterpreterValue::GetBuffer() const {
return std::visit(
[](const auto& it) -> std::shared_ptr<interpreter::Buffer> {
if constexpr (is_tensor_or_memref_v<decltype(it)>) {
return it.buffer;
} else {
llvm_unreachable("buffer() is only supported for tensors");
}
},
storage);
}
int64_t InterpreterValue::AsInt() const {
auto visit = [](auto value) -> int64_t {
if constexpr (std::is_integral_v<decltype(value)>) {
return static_cast<int64_t>(value);
} else {
llvm_unreachable("only integral types can be converted to ints");
}
};
return std::visit(visit, storage);
}
uint64_t InterpreterValue::AsUInt() const {
auto visit = [](auto value) -> uint64_t {
if constexpr (std::is_integral_v<decltype(value)>) {
if constexpr (std::is_signed_v<decltype(value)>) {
return static_cast<uint64_t>(
static_cast<std::make_unsigned_t<decltype(value)>>(value));
} else {
return static_cast<uint64_t>(value);
}
} else {
llvm_unreachable("only integral types can be converted to ints");
}
};
return std::visit(visit, storage);
}
double InterpreterValue::AsDouble() const {
auto visit = [](auto value) -> int64_t {
if constexpr (std::is_floating_point_v<decltype(value)>) {
return static_cast<double>(value);
} else {
llvm_unreachable("only float types can be converted to ints");
}
};
return std::visit(visit, storage);
}
int64_t InterpreterValue::GetByteSizeOfElement() const {
return std::visit(
[](const auto& it) -> int64_t {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
return sizeof(typename T::element_type);
} else {
llvm_unreachable("scalars have no element sizes");
}
},
storage);
}
}
} | #include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include <complex>
#include <cstdint>
#include <optional>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/ArrayRef.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
namespace mlir {
namespace interpreter {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
TEST(InterpreterValueTest, FillUnitTensor) {
auto t = TensorOrMemref<int64_t>::Empty({});
t.at({}) = 42;
InterpreterValue v{t};
v.Fill([](llvm::ArrayRef<int64_t>) { return InterpreterValue{int64_t{43}}; });
ASSERT_EQ(t.at({}), 43);
}
TEST(InterpreterValueTest, Fill1DTensor) {
auto t = TensorOrMemref<int64_t>::Empty({3});
InterpreterValue v{t};
v.Fill([](llvm::ArrayRef<int64_t> indices) {
return InterpreterValue{indices[0]};
});
ASSERT_EQ(t.at(0), 0);
ASSERT_EQ(t.at(1), 1);
ASSERT_EQ(t.at(2), 2);
}
TEST(InterpreterValueTest, FillTensorOfVector) {
auto t = TensorOrMemref<int64_t>::Empty({4, 2});
t.view.num_vector_dims = 1;
InterpreterValue v{t};
v.Fill([](llvm::ArrayRef<int64_t> indices) -> InterpreterValue {
EXPECT_EQ(indices.size(), 1);
auto r = TensorOrMemref<int64_t>::Empty({2});
r.view.is_vector = true;
r.at(0) = indices[0];
r.at(1) = indices[0] * 10;
return {r};
});
ASSERT_EQ(
v.ToString(),
"TensorOrMemref<4xvector<2xi64>>: [[0, 0], [1, 10], [2, 20], [3, 30]]");
}
TEST(InterpreterValueTest, FillZeroSizedTensor) {
auto t = TensorOrMemref<int64_t>::Empty({0, 1});
InterpreterValue v{t};
bool was_called = false;
v.Fill([&](llvm::ArrayRef<int64_t> indices) {
was_called = true;
return InterpreterValue{indices[0]};
});
EXPECT_FALSE(was_called);
}
TEST(InterpreterValueTest, TypedAlike) {
InterpreterValue v{TensorOrMemref<int32_t>::Empty({})};
auto TypedAlike = v.TypedAlike({1, 2, 3});
ASSERT_TRUE(
std::holds_alternative<TensorOrMemref<int32_t>>(TypedAlike.storage));
ASSERT_THAT(TypedAlike.View().sizes, ElementsAre(1, 2, 3));
}
TEST(InterpreterValueTest, AsUnitTensor) {
InterpreterValue v{42};
InterpreterValue wrapped = v.AsUnitTensor();
ASSERT_THAT(wrapped.View().sizes, IsEmpty());
ASSERT_EQ(std::get<TensorOrMemref<int32_t>>(wrapped.storage).at({}), 42);
}
TEST(InterpreterValueTest, IsTensor) {
ASSERT_FALSE(InterpreterValue{42}.IsTensor());
ASSERT_TRUE(InterpreterValue{TensorOrMemref<int32_t>::Empty({})}.IsTensor());
}
TEST(InterpreterValueTest, AsInt) {
ASSERT_EQ(InterpreterValue{int64_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int32_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int16_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int8_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int8_t{-1}}.AsInt(), -1);
}
TEST(InterpreterValueTest, AsUInt) {
ASSERT_EQ(InterpreterValue{int16_t{-1}}.AsUInt(), 65535);
ASSERT_EQ(InterpreterValue{int8_t{-1}}.AsUInt(), 255);
}
TEST(InterpreterValueTest, CloneTensor) {
auto tensor = TensorOrMemref<int64_t>::Empty({3});
tensor.at(0) = 1;
tensor.at(1) = 2;
tensor.at(2) = 3;
InterpreterValue wrapped{tensor};
auto clone = wrapped.Clone();
tensor.at(0) = 4;
auto& cloned_tensor = std::get<TensorOrMemref<int64_t>>(clone.storage);
ASSERT_EQ(cloned_tensor.at(0), 1);
ASSERT_EQ(cloned_tensor.at(1), 2);
ASSERT_EQ(cloned_tensor.at(2), 3);
}
TEST(InterpreterValueTest, CloneWithLayouts) {
auto tensor = TensorOrMemref<int64_t>::Empty({3, 5}, {0, 1});
tensor.at({2, 4}) = 42;
InterpreterValue wrapped{tensor};
auto clone = wrapped.Clone();
ASSERT_EQ(clone.View().strides,
BufferView::GetStridesForLayout({3, 5}, {1, 0}));
ASSERT_EQ(clone.ExtractElement({2, 4}).AsInt(), 42);
}
TEST(InterpreterValueTest, CoerceLayoutNoop) {
auto tensor = TensorOrMemref<int64_t>::Empty({3, 5}, {0, 1});
tensor.at({2, 4}) = 42;
InterpreterValue wrapped{tensor};
auto coerced = wrapped.CoerceLayout({0, 1});
ASSERT_EQ(tensor.buffer,
std::get<TensorOrMemref<int64_t>>(coerced.storage).buffer);
}
TEST(InterpreterValueTest, CoerceLayout) {
auto tensor = TensorOrMemref<int64_t>::Empty({3, 5});
tensor.at({2, 4}) = 42;
InterpreterValue wrapped{tensor};
auto clone = wrapped.CoerceLayout({0, 1});
ASSERT_EQ(clone.View().strides,
BufferView::GetStridesForLayout({3, 5}, {0, 1}));
ASSERT_EQ(clone.ExtractElement({2, 4}).AsInt(), 42);
}
TEST(InterpreterValueTest, CoerceLayoutSquare) {
auto tensor = TensorOrMemref<float>::Empty({2, 2});
tensor.at({0, 0}) = 1;
tensor.at({0, 1}) = 2;
tensor.at({1, 0}) = 3;
tensor.at({1, 1}) = 4;
InterpreterValue wrapped{tensor};
auto clone = wrapped.CoerceLayout({0, 1});
auto& cloned_tensor = std::get<TensorOrMemref<float>>(clone.storage);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(0, sizeof(float))), 1);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(1, sizeof(float))), 3);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(2, sizeof(float))), 2);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(3, sizeof(float))), 4);
}
TEST(InterpreterValueTest, CloneScalar) {
InterpreterValue value{42};
auto clone = value.Clone();
ASSERT_THAT(std::get<int32_t>(clone.storage), 42);
}
TEST(InterpreterValueTest, ToString) {
InterpreterValue value{TensorOrMemref<int64_t>::Empty({3})};
ASSERT_EQ(value.ToString(), "TensorOrMemref<3xi64>: [0, 0, 0]");
}
TEST(InterpreterValueTest, ToString2d) {
InterpreterValue value{TensorOrMemref<int64_t>::Empty({3, 2})};
ASSERT_EQ(value.ToString(),
"TensorOrMemref<3x2xi64>: [[0, 0], [0, 0], [0, 0]]");
}
TEST(InterpreterValueTest, ToString0d) {
InterpreterValue value{TensorOrMemref<int64_t>::Empty({})};
ASSERT_EQ(value.ToString(), "TensorOrMemref<i64>: 0");
}
TEST(InterpreterValueTest, ToStringComplex) {
InterpreterValue value{std::complex<float>{}};
ASSERT_EQ(value.ToString(), "complex<f32>: 0.000000e+00+0.000000e+00i");
}
TEST(CastTest, UnpackTensor) {
InterpreterValue value{TensorOrMemref<int8_t>::Empty({1, 1})};
value.InsertElement({0, 0}, {int8_t{1}});
ASSERT_EQ(InterpreterValueCast<int64_t>(value), 1);
ASSERT_EQ(InterpreterValueCast<uint8_t>(value), 1);
ASSERT_EQ(InterpreterValueCast<float>(value), 1.0f);
ASSERT_EQ(InterpreterValueCast<double>(value), 1.0);
InterpreterValue non_unit{TensorOrMemref<int8_t>::Empty({2, 2})};
ASSERT_EQ(InterpreterValueDynCast<int64_t>(non_unit), std::nullopt);
}
TEST(CastTest, IdentityCast) {
InterpreterValue value{TensorOrMemref<float>::Empty({1, 1})};
ASSERT_EQ(InterpreterValueCast<InterpreterValue>(value), value);
}
TEST(CastTest, CastToUnsigned) {
InterpreterValue value{int8_t{-1}};
ASSERT_EQ(InterpreterValueCast<uint8_t>(value), 255);
ASSERT_EQ(InterpreterValueCast<uint16_t>(value), 65535);
}
}
}
} | int64_t InterpreterValue::AsInt() const {
auto visit = [](auto value) -> int64_t {
if constexpr (std::is_integral_v<decltype(value)>) {
return static_cast<int64_t>(value);
} else {
llvm_unreachable("only integral types can be converted to ints");
}
};
return std::visit(visit, storage);
} | TEST(InterpreterValueTest, AsInt) {
ASSERT_EQ(InterpreterValue{int64_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int32_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int16_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int8_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int8_t{-1}}.AsInt(), -1);
} |
#include "arolla/io/input_loader.h"
#include <algorithm>
#include <cstddef>
#include <set>
#include <string>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/string.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
absl::Status ValidateDuplicatedNames(OutputTypesSpan output_types) {
absl::flat_hash_map<std::string, size_t> names_count;
std::vector<std::string> duplicated_names;
for (const auto& [name, type] : output_types) {
size_t& count = names_count[name];
if (count == 1) {
duplicated_names.push_back(name);
}
++count;
}
if (duplicated_names.empty()) {
return absl::OkStatus();
}
std::sort(duplicated_names.begin(), duplicated_names.end());
return absl::FailedPreconditionError(
absl::StrCat("accessors have duplicated names: ",
absl::StrJoin(duplicated_names, ", ")));
}
absl::StatusOr<absl::flat_hash_map<std::string, QTypePtr>> GetInputLoaderQTypes(
const InputLoaderBase& input_loader, absl::Span<const std::string> names) {
absl::flat_hash_map<std::string, QTypePtr> types;
types.reserve(names.size());
std::set<absl::string_view> unknown_types;
for (const auto& name : names) {
if (auto qtype = input_loader.GetQTypeOf(name); qtype != nullptr) {
types.emplace(name, qtype);
} else {
unknown_types.emplace(name);
}
}
if (!unknown_types.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"unknown inputs: %s (available: %s)",
Truncate(absl::StrJoin(unknown_types, ", "), 200),
Truncate(absl::StrJoin(input_loader.SuggestAvailableNames(), ", "),
200)));
}
return types;
}
absl::Status InputLoaderBase::ValidateSlotTypes(
const absl::flat_hash_map<std::string, TypedSlot>& slots) const {
std::vector<std::string> names;
names.reserve(slots.size());
for (const auto& [name, _] : slots) {
names.emplace_back(name);
}
ASSIGN_OR_RETURN(auto types, GetInputLoaderQTypes(*this, names));
return VerifySlotTypes(types, slots,
true,
false);
}
absl::flat_hash_map<std::string, TypedSlot>
InputLoaderBase::ExtractSupportedSlots(
absl::Nonnull<absl::flat_hash_map<std::string, TypedSlot>*> slots) const {
absl::flat_hash_map<std::string, TypedSlot> partial_slots;
for (const auto& [name, slot] : *slots) {
if (GetQTypeOf(name) == nullptr) {
continue;
}
partial_slots.emplace(name, slot);
}
for (const auto& [name, _] : partial_slots) {
slots->erase(name);
}
return partial_slots;
}
} | #include "arolla/io/input_loader.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "arolla/io/accessors_input_loader.h"
#include "arolla/io/testing/matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::InputLoaderSupports;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
struct TestStruct {
int a;
double b;
};
TEST(InputLoaderTest, GetInputLoaderTypes) {
ASSERT_OK_AND_ASSIGN(auto loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; }));
EXPECT_THAT(GetInputLoaderQTypes(*loader, {}), IsOkAndHolds(IsEmpty()));
EXPECT_THAT(
GetInputLoaderQTypes(*loader, {"a"}),
IsOkAndHolds(UnorderedElementsAre(Pair("a", GetQType<int32_t>()))));
EXPECT_THAT(
GetInputLoaderQTypes(*loader, {"a", "b"}),
IsOkAndHolds(UnorderedElementsAre(Pair("a", GetQType<int32_t>()),
Pair("b", GetQType<double>()))));
EXPECT_THAT(GetInputLoaderQTypes(*loader, {"a", "b", "c"}),
StatusIs(absl::StatusCode::kInvalidArgument,
"unknown inputs: c (available: a, b)"));
}
TEST(InputLoaderTest, ChainInputLoaderConflict) {
ASSERT_OK_AND_ASSIGN(auto loader1,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(auto loader2,
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return 2 * s.b; },
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(auto chain_loader,
ChainInputLoader<TestStruct>::Build(std::move(loader1),
std::move(loader2)));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
chain_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
}
TEST(InputLoaderTest, MakeNotOwningInputLoader) {
ASSERT_OK_AND_ASSIGN(std::unique_ptr<InputLoader<TestStruct>> wrapped_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
std::unique_ptr<InputLoader<TestStruct>> not_owning_loader =
MakeNotOwningInputLoader(wrapped_loader.get());
EXPECT_THAT(not_owning_loader->GetQTypeOf("a"), Eq(GetQType<int32_t>()));
EXPECT_THAT(not_owning_loader->GetQTypeOf("b"), IsNull());
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
FrameLayout memory_layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
not_owning_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
TEST(InputLoaderTest, MakeSharedOwningInputLoader) {
std::unique_ptr<InputLoader<TestStruct>> shared_owning_loader;
{
ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const InputLoader<TestStruct>> wrapped_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
shared_owning_loader = MakeSharedOwningInputLoader(wrapped_loader);
}
EXPECT_THAT(shared_owning_loader->GetQTypeOf("a"), Eq(GetQType<int32_t>()));
EXPECT_THAT(shared_owning_loader->GetQTypeOf("b"), IsNull());
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
FrameLayout memory_layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
shared_owning_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
TEST(InputLoaderTest, BindInputLoaderList) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return int{0}; },
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(
std::vector<BoundInputLoader<TestStruct>> bound_input_loaders,
BindInputLoaderList<TestStruct>(input_loaders,
{
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
MemoryAllocation alloc(&memory_layout);
TestStruct input{5, 3.5};
for (const auto& bound_input_loader : bound_input_loaders) {
ASSERT_OK(bound_input_loader(input, alloc.frame()));
}
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, BindInputLoaderListErrors) {
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
EXPECT_THAT(
BindInputLoaderList<TestStruct>(input_loaders,
{
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}),
StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("not all")));
}
TEST(InputLoaderTest, FilteringInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
ASSERT_OK_AND_ASSIGN(auto inner_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; }));
EXPECT_THAT(inner_loader->GetQTypeOf("a"), Eq(i32));
EXPECT_THAT(inner_loader->GetQTypeOf("b"), Eq(f64));
auto filtered_loader =
MakeFilteringInputLoader(std::move(inner_loader), {"a"});
EXPECT_THAT(filtered_loader->GetQTypeOf("a"), Eq(i32));
EXPECT_THAT(filtered_loader->GetQTypeOf("b"), IsNull());
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
EXPECT_THAT(filtered_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"unknown inputs: b (available: a)"));
ASSERT_OK_AND_ASSIGN(
BoundInputLoader<TestStruct> bound_input_loader,
filtered_loader->Bind({{"a", TypedSlot::FromSlot(a_slot)}}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
TEST(InputLoaderTest, ChainInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
std::unique_ptr<InputLoader<TestStruct>> chain_input_loader;
{
ASSERT_OK_AND_ASSIGN(auto loader1,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(auto loader2,
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(
auto loader3, CreateAccessorsInputLoader<TestStruct>(
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(
chain_input_loader,
ChainInputLoader<TestStruct>::Build(
std::move(loader1), std::move(loader2), std::move(loader3)));
}
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
EXPECT_THAT(*chain_input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"c", f64}}));
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
chain_input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, ChainInputLoaderFactoryPropagated) {
auto qbool = GetQType<bool>();
std::unique_ptr<InputLoader<TestStruct>> input_loader;
UnsafeArenaBufferFactory global_factory1(1000);
UnsafeArenaBufferFactory global_factory2(1000);
{
ASSERT_OK_AND_ASSIGN(auto loader1, CreateAccessorsInputLoader<TestStruct>(
"a", [&](const TestStruct&,
RawBufferFactory* factory) {
return factory == &global_factory1;
}));
ASSERT_OK_AND_ASSIGN(auto loader2, CreateAccessorsInputLoader<TestStruct>(
"b", [&](const TestStruct&,
RawBufferFactory* factory) {
return factory == &global_factory2;
}));
ASSERT_OK_AND_ASSIGN(
input_loader, ChainInputLoader<TestStruct>::Build(std::move(loader1),
std::move(loader2)));
}
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<bool>();
auto b_slot = layout_builder.AddSlot<bool>();
FrameLayout memory_layout = std::move(layout_builder).Build();
EXPECT_THAT(input_loader, InputLoaderSupports({{"a", qbool}, {"b", qbool}}));
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
}));
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory1));
EXPECT_TRUE(alloc.frame().Get(a_slot));
EXPECT_FALSE(alloc.frame().Get(b_slot));
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory2));
EXPECT_FALSE(alloc.frame().Get(a_slot));
EXPECT_TRUE(alloc.frame().Get(b_slot));
}
TEST(InputLoaderTest, ChainInputLoaderWithCustomInvoke) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
std::unique_ptr<InputLoader<TestStruct>> chain_input_loader;
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
FrameLayout memory_layout = std::move(layout_builder).Build();
int64_t number_of_loaders = -1;
{
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(
input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"c", [](const TestStruct& s) { return s.b * s.b; }));
ASSERT_OK_AND_ASSIGN(
chain_input_loader,
ChainInputLoader<TestStruct>::Build(
std::move(input_loaders),
[&number_of_loaders](
absl::Span<const BoundInputLoader<TestStruct>> loaders,
const TestStruct& input, FramePtr frame,
RawBufferFactory* factory) {
number_of_loaders = loaders.size();
return ChainInputLoader<TestStruct>::InvokeBoundLoaders(
loaders, input, frame, factory);
}));
EXPECT_THAT(*chain_input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"c", f64}}));
}
BoundInputLoader<TestStruct> bound_input_loader(nullptr);
{
ASSERT_OK_AND_ASSIGN(bound_input_loader,
chain_input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
}
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(number_of_loaders, 3);
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, ChainInputLoaderWithCustomInvokeOptimized) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
std::unique_ptr<InputLoader<TestStruct>> chain_input_loader;
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
FrameLayout memory_layout = std::move(layout_builder).Build();
int64_t number_of_loaders = -1;
{
std::vector<std::unique_ptr<InputLoader<TestStruct>>> input_loaders;
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; }));
ASSERT_OK_AND_ASSIGN(input_loaders.emplace_back(),
CreateAccessorsInputLoader<TestStruct>(
"b", [](const TestStruct& s) { return s.b; }));
ASSERT_OK_AND_ASSIGN(
chain_input_loader,
ChainInputLoader<TestStruct>::Build(
std::move(input_loaders),
[&number_of_loaders](
absl::Span<const BoundInputLoader<TestStruct>> loaders,
const TestStruct& input, FramePtr frame,
RawBufferFactory* factory) {
number_of_loaders = loaders.size();
return ChainInputLoader<TestStruct>::InvokeBoundLoaders(
loaders, input, frame, factory);
}));
EXPECT_THAT(*chain_input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}}));
}
BoundInputLoader<TestStruct> bound_input_loader(nullptr);
{
ASSERT_OK_AND_ASSIGN(bound_input_loader,
chain_input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
}
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(number_of_loaders, -1);
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
}
} | absl::StatusOr<absl::flat_hash_map<std::string, QTypePtr>> GetInputLoaderQTypes(
const InputLoaderBase& input_loader, absl::Span<const std::string> names) {
absl::flat_hash_map<std::string, QTypePtr> types;
types.reserve(names.size());
std::set<absl::string_view> unknown_types;
for (const auto& name : names) {
if (auto qtype = input_loader.GetQTypeOf(name); qtype != nullptr) {
types.emplace(name, qtype);
} else {
unknown_types.emplace(name);
}
}
if (!unknown_types.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"unknown inputs: %s (available: %s)",
Truncate(absl::StrJoin(unknown_types, ", "), 200),
Truncate(absl::StrJoin(input_loader.SuggestAvailableNames(), ", "),
200)));
}
return types;
} | TEST(InputLoaderTest, GetInputLoaderTypes) {
ASSERT_OK_AND_ASSIGN(auto loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; }));
EXPECT_THAT(GetInputLoaderQTypes(*loader, {}), IsOkAndHolds(IsEmpty()));
EXPECT_THAT(
GetInputLoaderQTypes(*loader, {"a"}),
IsOkAndHolds(UnorderedElementsAre(Pair("a", GetQType<int32_t>()))));
EXPECT_THAT(
GetInputLoaderQTypes(*loader, {"a", "b"}),
IsOkAndHolds(UnorderedElementsAre(Pair("a", GetQType<int32_t>()),
Pair("b", GetQType<double>()))));
EXPECT_THAT(GetInputLoaderQTypes(*loader, {"a", "b", "c"}),
StatusIs(absl::StatusCode::kInvalidArgument,
"unknown inputs: c (available: a, b)"));
} |
#include "quiche/quic/core/congestion_control/rtt_stats.h"
#include <algorithm>
#include <cstdlib>
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
const float kAlpha = 0.125f;
const float kOneMinusAlpha = (1 - kAlpha);
const float kBeta = 0.25f;
const float kOneMinusBeta = (1 - kBeta);
}
RttStats::RttStats()
: latest_rtt_(QuicTime::Delta::Zero()),
min_rtt_(QuicTime::Delta::Zero()),
smoothed_rtt_(QuicTime::Delta::Zero()),
previous_srtt_(QuicTime::Delta::Zero()),
mean_deviation_(QuicTime::Delta::Zero()),
calculate_standard_deviation_(false),
initial_rtt_(QuicTime::Delta::FromMilliseconds(kInitialRttMs)),
last_update_time_(QuicTime::Zero()) {}
void RttStats::ExpireSmoothedMetrics() {
mean_deviation_ = std::max(
mean_deviation_, QuicTime::Delta::FromMicroseconds(std::abs(
(smoothed_rtt_ - latest_rtt_).ToMicroseconds())));
smoothed_rtt_ = std::max(smoothed_rtt_, latest_rtt_);
}
bool RttStats::UpdateRtt(QuicTime::Delta send_delta, QuicTime::Delta ack_delay,
QuicTime now) {
if (send_delta.IsInfinite() || send_delta <= QuicTime::Delta::Zero()) {
QUIC_LOG_FIRST_N(WARNING, 3)
<< "Ignoring measured send_delta, because it's is "
<< "either infinite, zero, or negative. send_delta = "
<< send_delta.ToMicroseconds();
return false;
}
last_update_time_ = now;
if (min_rtt_.IsZero() || min_rtt_ > send_delta) {
min_rtt_ = send_delta;
}
QuicTime::Delta rtt_sample(send_delta);
previous_srtt_ = smoothed_rtt_;
if (rtt_sample > ack_delay) {
if (rtt_sample - min_rtt_ >= ack_delay) {
rtt_sample = rtt_sample - ack_delay;
} else {
QUIC_CODE_COUNT(quic_ack_delay_makes_rtt_sample_smaller_than_min_rtt);
}
} else {
QUIC_CODE_COUNT(quic_ack_delay_greater_than_rtt_sample);
}
latest_rtt_ = rtt_sample;
if (calculate_standard_deviation_) {
standard_deviation_calculator_.OnNewRttSample(rtt_sample, smoothed_rtt_);
}
if (smoothed_rtt_.IsZero()) {
smoothed_rtt_ = rtt_sample;
mean_deviation_ =
QuicTime::Delta::FromMicroseconds(rtt_sample.ToMicroseconds() / 2);
} else {
mean_deviation_ = QuicTime::Delta::FromMicroseconds(static_cast<int64_t>(
kOneMinusBeta * mean_deviation_.ToMicroseconds() +
kBeta * std::abs((smoothed_rtt_ - rtt_sample).ToMicroseconds())));
smoothed_rtt_ = kOneMinusAlpha * smoothed_rtt_ + kAlpha * rtt_sample;
QUIC_DVLOG(1) << " smoothed_rtt(us):" << smoothed_rtt_.ToMicroseconds()
<< " mean_deviation(us):" << mean_deviation_.ToMicroseconds();
}
return true;
}
void RttStats::OnConnectionMigration() {
latest_rtt_ = QuicTime::Delta::Zero();
min_rtt_ = QuicTime::Delta::Zero();
smoothed_rtt_ = QuicTime::Delta::Zero();
mean_deviation_ = QuicTime::Delta::Zero();
initial_rtt_ = QuicTime::Delta::FromMilliseconds(kInitialRttMs);
}
QuicTime::Delta RttStats::GetStandardOrMeanDeviation() const {
QUICHE_DCHECK(calculate_standard_deviation_);
if (!standard_deviation_calculator_.has_valid_standard_deviation) {
return mean_deviation_;
}
return standard_deviation_calculator_.CalculateStandardDeviation();
}
void RttStats::StandardDeviationCalculator::OnNewRttSample(
QuicTime::Delta rtt_sample, QuicTime::Delta smoothed_rtt) {
double new_value = rtt_sample.ToMicroseconds();
if (smoothed_rtt.IsZero()) {
return;
}
has_valid_standard_deviation = true;
const double delta = new_value - smoothed_rtt.ToMicroseconds();
m2 = kOneMinusBeta * m2 + kBeta * pow(delta, 2);
}
QuicTime::Delta
RttStats::StandardDeviationCalculator::CalculateStandardDeviation() const {
QUICHE_DCHECK(has_valid_standard_deviation);
return QuicTime::Delta::FromMicroseconds(sqrt(m2));
}
void RttStats::CloneFrom(const RttStats& stats) {
latest_rtt_ = stats.latest_rtt_;
min_rtt_ = stats.min_rtt_;
smoothed_rtt_ = stats.smoothed_rtt_;
previous_srtt_ = stats.previous_srtt_;
mean_deviation_ = stats.mean_deviation_;
standard_deviation_calculator_ = stats.standard_deviation_calculator_;
calculate_standard_deviation_ = stats.calculate_standard_deviation_;
initial_rtt_ = stats.initial_rtt_;
last_update_time_ = stats.last_update_time_;
}
} | #include "quiche/quic/core/congestion_control/rtt_stats.h"
#include <cmath>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::Message;
namespace quic {
namespace test {
class RttStatsTest : public QuicTest {
protected:
RttStats rtt_stats_;
};
TEST_F(RttStatsTest, DefaultsBeforeUpdate) {
EXPECT_LT(QuicTime::Delta::Zero(), rtt_stats_.initial_rtt());
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.min_rtt());
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.smoothed_rtt());
}
TEST_F(RttStatsTest, SmoothedRtt) {
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(300),
QuicTime::Delta::FromMilliseconds(100),
QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(400),
QuicTime::Delta::FromMilliseconds(100),
QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(350),
QuicTime::Delta::FromMilliseconds(50), QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
QuicTime::Delta::FromMilliseconds(300),
QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(287500),
rtt_stats_.smoothed_rtt());
}
TEST_F(RttStatsTest, SmoothedRttStability) {
for (size_t time = 3; time < 20000; time++) {
RttStats stats;
for (size_t i = 0; i < 100; i++) {
stats.UpdateRtt(QuicTime::Delta::FromMicroseconds(time),
QuicTime::Delta::FromMilliseconds(0), QuicTime::Zero());
int64_t time_delta_us = stats.smoothed_rtt().ToMicroseconds() - time;
ASSERT_LE(std::abs(time_delta_us), 1);
}
}
}
TEST_F(RttStatsTest, PreviousSmoothedRtt) {
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
QuicTime::Delta::FromMilliseconds(0), QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.smoothed_rtt());
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.previous_srtt());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMicroseconds(187500).ToMicroseconds(),
rtt_stats_.smoothed_rtt().ToMicroseconds());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.previous_srtt());
}
TEST_F(RttStatsTest, MinRtt) {
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.min_rtt());
rtt_stats_.UpdateRtt(
QuicTime::Delta::FromMilliseconds(10), QuicTime::Delta::Zero(),
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(10));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
rtt_stats_.UpdateRtt(
QuicTime::Delta::FromMilliseconds(50), QuicTime::Delta::Zero(),
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(20));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
rtt_stats_.UpdateRtt(
QuicTime::Delta::FromMilliseconds(50), QuicTime::Delta::Zero(),
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(30));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
rtt_stats_.UpdateRtt(
QuicTime::Delta::FromMilliseconds(50), QuicTime::Delta::Zero(),
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(40));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
rtt_stats_.UpdateRtt(
QuicTime::Delta::FromMilliseconds(7),
QuicTime::Delta::FromMilliseconds(2),
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(50));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(7), rtt_stats_.min_rtt());
}
TEST_F(RttStatsTest, ExpireSmoothedMetrics) {
QuicTime::Delta initial_rtt = QuicTime::Delta::FromMilliseconds(10);
rtt_stats_.UpdateRtt(initial_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(initial_rtt, rtt_stats_.min_rtt());
EXPECT_EQ(initial_rtt, rtt_stats_.smoothed_rtt());
EXPECT_EQ(0.5 * initial_rtt, rtt_stats_.mean_deviation());
QuicTime::Delta doubled_rtt = 2 * initial_rtt;
rtt_stats_.UpdateRtt(doubled_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(1.125 * initial_rtt, rtt_stats_.smoothed_rtt());
rtt_stats_.ExpireSmoothedMetrics();
EXPECT_EQ(doubled_rtt, rtt_stats_.smoothed_rtt());
EXPECT_EQ(0.875 * initial_rtt, rtt_stats_.mean_deviation());
QuicTime::Delta half_rtt = 0.5 * initial_rtt;
rtt_stats_.UpdateRtt(half_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_GT(doubled_rtt, rtt_stats_.smoothed_rtt());
EXPECT_LT(initial_rtt, rtt_stats_.mean_deviation());
}
TEST_F(RttStatsTest, UpdateRttWithBadSendDeltas) {
QuicTime::Delta initial_rtt = QuicTime::Delta::FromMilliseconds(10);
rtt_stats_.UpdateRtt(initial_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(initial_rtt, rtt_stats_.min_rtt());
EXPECT_EQ(initial_rtt, rtt_stats_.smoothed_rtt());
std::vector<QuicTime::Delta> bad_send_deltas;
bad_send_deltas.push_back(QuicTime::Delta::Zero());
bad_send_deltas.push_back(QuicTime::Delta::Infinite());
bad_send_deltas.push_back(QuicTime::Delta::FromMicroseconds(-1000));
for (QuicTime::Delta bad_send_delta : bad_send_deltas) {
SCOPED_TRACE(Message() << "bad_send_delta = "
<< bad_send_delta.ToMicroseconds());
EXPECT_FALSE(rtt_stats_.UpdateRtt(bad_send_delta, QuicTime::Delta::Zero(),
QuicTime::Zero()));
EXPECT_EQ(initial_rtt, rtt_stats_.min_rtt());
EXPECT_EQ(initial_rtt, rtt_stats_.smoothed_rtt());
}
}
TEST_F(RttStatsTest, ResetAfterConnectionMigrations) {
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
QuicTime::Delta::FromMilliseconds(0), QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.smoothed_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.min_rtt());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(300),
QuicTime::Delta::FromMilliseconds(100),
QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.smoothed_rtt());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.min_rtt());
rtt_stats_.OnConnectionMigration();
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.latest_rtt());
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.smoothed_rtt());
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.min_rtt());
}
TEST_F(RttStatsTest, StandardDeviationCalculatorTest1) {
rtt_stats_.EnableStandardDeviationCalculation();
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(rtt_stats_.mean_deviation(),
rtt_stats_.GetStandardOrMeanDeviation());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.GetStandardOrMeanDeviation());
}
TEST_F(RttStatsTest, StandardDeviationCalculatorTest2) {
rtt_stats_.EnableStandardDeviationCalculation();
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(9),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(11),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_LT(QuicTime::Delta::FromMicroseconds(500),
rtt_stats_.GetStandardOrMeanDeviation());
EXPECT_GT(QuicTime::Delta::FromMilliseconds(1),
rtt_stats_.GetStandardOrMeanDeviation());
}
TEST_F(RttStatsTest, StandardDeviationCalculatorTest3) {
rtt_stats_.EnableStandardDeviationCalculation();
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(50),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(50),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_APPROX_EQ(rtt_stats_.mean_deviation(),
rtt_stats_.GetStandardOrMeanDeviation(), 0.25f);
}
}
} | QuicTime::Delta RttStats::GetStandardOrMeanDeviation() const {
QUICHE_DCHECK(calculate_standard_deviation_);
if (!standard_deviation_calculator_.has_valid_standard_deviation) {
return mean_deviation_;
}
return standard_deviation_calculator_.CalculateStandardDeviation();
} | TEST_F(RttStatsTest, StandardDeviationCalculatorTest1) {
rtt_stats_.EnableStandardDeviationCalculation();
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(rtt_stats_.mean_deviation(),
rtt_stats_.GetStandardOrMeanDeviation());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.GetStandardOrMeanDeviation());
}
TEST_F(RttStatsTest, StandardDeviationCalculatorTest2) {
rtt_stats_.EnableStandardDeviationCalculation();
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(10),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(9),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(11),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_LT(QuicTime::Delta::FromMicroseconds(500),
rtt_stats_.GetStandardOrMeanDeviation());
EXPECT_GT(QuicTime::Delta::FromMilliseconds(1),
rtt_stats_.GetStandardOrMeanDeviation());
}
TEST_F(RttStatsTest, StandardDeviationCalculatorTest3) {
rtt_stats_.EnableStandardDeviationCalculation();
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(50),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(50),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_APPROX_EQ(rtt_stats_.mean_deviation(),
rtt_stats_.GetStandardOrMeanDeviation(), 0.25f);
} |
#include "tensorflow/core/graph/node_builder.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
NodeBuilder::NodeOut::NodeOut(Node* n, int32_t i)
: node(n),
error(false),
name(node != nullptr ? node->name() : (error = true, "")),
index(i),
dt(SafeGetOutput(node, i, &error)) {}
NodeBuilder::NodeOut::NodeOut(OutputTensor t) : NodeOut(t.node, t.index) {}
NodeBuilder::NodeOut::NodeOut(StringPiece n, int32_t i, DataType t)
: node(nullptr), error(false), name(n), index(i), dt(t) {}
NodeBuilder::NodeOut::NodeOut()
: node(nullptr), error(true), index(0), dt(DT_FLOAT) {}
NodeBuilder::NodeBuilder(StringPiece name, StringPiece op_name,
const OpRegistryInterface* op_registry,
const NodeDebugInfo* debug)
: def_builder_(name, op_name, op_registry, debug) {}
NodeBuilder::NodeBuilder(StringPiece name, const OpDef* op_def)
: def_builder_(name, op_def) {}
NodeBuilder::NodeBuilder(const NodeDefBuilder& def_builder)
: def_builder_(def_builder) {}
NodeBuilder& NodeBuilder::Input(Node* src_node, int src_index) {
inputs_.emplace_back(src_node, src_index);
DataType dt;
if (GetOutputType(src_node, src_index, &dt)) {
def_builder_.Input(src_node->name(), src_index, dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(NodeOut src) {
if (src.error) {
AddIndexError(src.node, src.index);
} else {
inputs_.emplace_back(src.node, src.index);
def_builder_.Input(src.name, src.index, src.dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(absl::Span<const NodeOut> src_list) {
std::vector<NodeDefBuilder::NodeOut> srcs;
srcs.reserve(src_list.size());
for (const auto& node_out : src_list) {
if (node_out.error) {
AddIndexError(node_out.node, node_out.index);
} else {
srcs.emplace_back(node_out.name, node_out.index, node_out.dt);
inputs_.emplace_back(node_out.node, node_out.index);
}
}
def_builder_.Input(absl::Span<const NodeDefBuilder::NodeOut>(srcs));
return *this;
}
NodeBuilder& NodeBuilder::ControlInput(Node* src_node) {
control_inputs_.emplace_back(src_node);
def_builder_.ControlInput(src_node->name());
return *this;
}
NodeBuilder& NodeBuilder::ControlInputs(absl::Span<Node* const> src_nodes) {
control_inputs_.insert(control_inputs_.end(), src_nodes.begin(),
src_nodes.end());
for (const Node* src_node : src_nodes) {
def_builder_.ControlInput(src_node->name());
}
return *this;
}
NodeBuilder& NodeBuilder::Device(StringPiece device_spec) {
def_builder_.Device(device_spec);
return *this;
}
NodeBuilder& NodeBuilder::AssignedDevice(StringPiece device) {
assigned_device_ = string(device);
return *this;
}
NodeBuilder& NodeBuilder::XlaCluster(StringPiece xla_cluster) {
def_builder_.Attr("_XlaCluster", xla_cluster);
return *this;
}
absl::StatusOr<Node*> NodeBuilder::Finalize(Graph* graph, bool consume) {
Node* out;
TF_RETURN_IF_ERROR(Finalize(graph, &out, consume));
return out;
}
Status NodeBuilder::Finalize(Graph* graph, Node** created_node, bool consume) {
if (created_node != nullptr) {
*created_node = nullptr;
}
if (!errors_.empty()) {
return errors::InvalidArgument(absl::StrJoin(errors_, "\n"));
}
NodeDef node_def;
TF_RETURN_IF_ERROR(def_builder_.Finalize(&node_def, consume));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, def_builder_.op_def()));
TF_RETURN_IF_ERROR(
CheckOpDeprecation(def_builder_.op_def(), graph->versions().producer()));
TF_ASSIGN_OR_RETURN(Node * node, graph->AddNode(std::move(node_def)));
node->set_assigned_device_name(assigned_device_);
for (size_t i = 0; i < inputs_.size(); ++i) {
if (inputs_[i].node != nullptr) {
graph->AddEdge(inputs_[i].node, inputs_[i].index, node, i);
}
}
for (Node* control_input : control_inputs_) {
graph->AddControlEdge(control_input, node);
}
if (created_node != nullptr) *created_node = node;
return absl::OkStatus();
}
void NodeBuilder::AddIndexError(const Node* node, int i) {
if (node == nullptr) {
errors_.emplace_back(
strings::StrCat("Attempt to add nullptr Node to node with type ",
def_builder_.op_def().name()));
} else {
errors_.emplace_back(strings::StrCat(
"Attempt to add output ", i, " of ", node->name(), " not in range [0, ",
node->num_outputs(), ") to node with type ",
def_builder_.op_def().name(), ". Node: ", FormatNodeForError(*node)));
}
}
bool NodeBuilder::GetOutputType(const Node* node, int i, DataType* dt) {
bool error;
*dt = SafeGetOutput(node, i, &error);
if (error) AddIndexError(node, i);
return !error;
}
} | #include "tensorflow/core/graph/node_builder.h"
#include <string>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("Source").Output("o: out_types").Attr("out_types: list(type)");
REGISTER_OP("Sink").Input("i: T").Attr("T: type");
TEST(NodeBuilderTest, Simple) {
Graph graph(OpRegistry::Global());
Node* source_node;
TF_EXPECT_OK(NodeBuilder("source_op", "Source")
.Attr("out_types", {DT_INT32, DT_STRING})
.Finalize(&graph, &source_node));
ASSERT_TRUE(source_node != nullptr);
TF_EXPECT_OK(NodeBuilder("sink1", "Sink")
.Input(source_node)
.Finalize(&graph, nullptr));
TF_EXPECT_OK(NodeBuilder("sink2", "Sink")
.Input(source_node, 1)
.Finalize(&graph, nullptr));
EXPECT_FALSE(NodeBuilder("sink3", "Sink")
.Input(source_node, 2)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink4", "Sink")
.Input(source_node, -1)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink5", "Sink")
.Input({source_node, -1})
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink6", "Sink")
.Input(nullptr)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink7", "Sink")
.Input(NodeBuilder::NodeOut(nullptr, 0))
.Finalize(&graph, nullptr)
.ok());
}
REGISTER_OP("FullTypeOpBasicType")
.Output("o1: out_type")
.Attr("out_type: type")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_type");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorBasicType) {
Graph graph(OpRegistry::Global());
Node* node;
TF_EXPECT_OK(NodeBuilder("op", "FullTypeOpBasicType")
.Attr("out_type", DT_FLOAT)
.Finalize(&graph, &node));
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 1);
auto ot = ft.args(0);
ASSERT_EQ(ot.type_id(), TFT_ARRAY);
ASSERT_EQ(ot.args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ot.args(0).args().size(), 0);
}
REGISTER_OP("FullTypeOpListType")
.Output("o1: out_types")
.Attr("out_types: list(type)")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_types");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorListType) {
Graph graph(OpRegistry::Global());
Node* node;
ASSERT_FALSE(NodeBuilder("op", "FullTypeOpListType")
.Attr("out_types", {DT_FLOAT, DT_INT32})
.Finalize(&graph, &node)
.ok());
}
}
} | Status NodeBuilder::Finalize(Graph* graph, Node** created_node, bool consume) {
if (created_node != nullptr) {
*created_node = nullptr;
}
if (!errors_.empty()) {
return errors::InvalidArgument(absl::StrJoin(errors_, "\n"));
}
NodeDef node_def;
TF_RETURN_IF_ERROR(def_builder_.Finalize(&node_def, consume));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, def_builder_.op_def()));
TF_RETURN_IF_ERROR(
CheckOpDeprecation(def_builder_.op_def(), graph->versions().producer()));
TF_ASSIGN_OR_RETURN(Node * node, graph->AddNode(std::move(node_def)));
node->set_assigned_device_name(assigned_device_);
for (size_t i = 0; i < inputs_.size(); ++i) {
if (inputs_[i].node != nullptr) {
graph->AddEdge(inputs_[i].node, inputs_[i].index, node, i);
}
}
for (Node* control_input : control_inputs_) {
graph->AddControlEdge(control_input, node);
}
if (created_node != nullptr) *created_node = node;
return absl::OkStatus();
} | TEST(NodeBuilderTest, Simple) {
Graph graph(OpRegistry::Global());
Node* source_node;
TF_EXPECT_OK(NodeBuilder("source_op", "Source")
.Attr("out_types", {DT_INT32, DT_STRING})
.Finalize(&graph, &source_node));
ASSERT_TRUE(source_node != nullptr);
TF_EXPECT_OK(NodeBuilder("sink1", "Sink")
.Input(source_node)
.Finalize(&graph, nullptr));
TF_EXPECT_OK(NodeBuilder("sink2", "Sink")
.Input(source_node, 1)
.Finalize(&graph, nullptr));
EXPECT_FALSE(NodeBuilder("sink3", "Sink")
.Input(source_node, 2)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink4", "Sink")
.Input(source_node, -1)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink5", "Sink")
.Input({source_node, -1})
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink6", "Sink")
.Input(nullptr)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink7", "Sink")
.Input(NodeBuilder::NodeOut(nullptr, 0))
.Finalize(&graph, nullptr)
.ok());
}
TEST(NodeBuilderTest, TypeConstructorBasicType) {
Graph graph(OpRegistry::Global());
Node* node;
TF_EXPECT_OK(NodeBuilder("op", "FullTypeOpBasicType")
.Attr("out_type", DT_FLOAT)
.Finalize(&graph, &node));
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 1);
auto ot = ft.args(0);
ASSERT_EQ(ot.type_id(), TFT_ARRAY);
ASSERT_EQ(ot.args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ot.args(0).args().size(), 0);
}
TEST(NodeBuilderTest, TypeConstructorListType) {
Graph graph(OpRegistry::Global());
Node* node;
ASSERT_FALSE(NodeBuilder("op", "FullTypeOpListType")
.Attr("out_types", {DT_FLOAT, DT_INT32})
.Finalize(&graph, &node)
.ok());
} |
#include "tensorstore/internal/http/http_request.h"
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
namespace tensorstore {
namespace internal_http {
std::optional<std::string> FormatRangeHeader(
OptionalByteRangeRequest byte_range) {
assert(byte_range.SatisfiesInvariants());
if (byte_range.IsRange() &&
byte_range.exclusive_max > byte_range.inclusive_min) {
return absl::StrFormat("Range: bytes=%d-%d", byte_range.inclusive_min,
byte_range.exclusive_max - 1);
}
if (byte_range.IsSuffix()) {
return absl::StrFormat("Range: bytes=%d-", byte_range.inclusive_min);
}
if (byte_range.IsSuffixLength()) {
return absl::StrFormat("Range: bytes=%d", byte_range.inclusive_min);
}
return std::nullopt;
}
std::optional<std::string> FormatCacheControlMaxAgeHeader(
absl::Duration max_age) {
if (max_age >= absl::InfiniteDuration()) {
return std::nullopt;
}
auto max_age_seconds = absl::ToInt64Seconds(max_age);
if (max_age_seconds > 0) {
return absl::StrFormat("cache-control: max-age=%d", max_age_seconds);
} else {
return "cache-control: no-cache";
}
}
std::optional<std::string> FormatStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
if (staleness_bound == absl::InfinitePast()) {
return std::nullopt;
}
absl::Time now;
absl::Duration duration = absl::ZeroDuration();
if (staleness_bound != absl::InfiniteFuture() &&
(now = absl::Now()) > staleness_bound) {
duration = now - staleness_bound;
}
return FormatCacheControlMaxAgeHeader(duration);
}
HttpRequestBuilder::HttpRequestBuilder(
std::string_view method, std::string base_url,
absl::FunctionRef<std::string(std::string_view)> uri_encoder)
: uri_encoder_(uri_encoder),
request_{std::string(method), std::move(base_url)},
query_parameter_separator_("?") {
assert(!request_.method.empty());
assert(request_.method ==
absl::AsciiStrToUpper(std::string_view(request_.method)));
if (request_.url.find_last_of('?') != std::string::npos) {
query_parameter_separator_ = "&";
}
}
HttpRequest HttpRequestBuilder::BuildRequest() { return std::move(request_); }
HttpRequestBuilder& HttpRequestBuilder::AddHeader(std::string header) {
if (!header.empty()) {
request_.headers.push_back(std::move(header));
}
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::AddQueryParameter(
std::string_view key, std::string_view value) {
assert(!key.empty());
if (value.empty()) {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key));
} else {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key), "=", uri_encoder_(value));
}
query_parameter_separator_ = "&";
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::EnableAcceptEncoding() {
request_.accept_encoding = true;
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddRangeHeader(
OptionalByteRangeRequest byte_range) {
return AddHeader(FormatRangeHeader(std::move(byte_range)));
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddCacheControlMaxAgeHeader(
absl::Duration max_age) {
return AddHeader(FormatCacheControlMaxAgeHeader(max_age));
}
HttpRequestBuilder&
HttpRequestBuilder::MaybeAddStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
return AddHeader(FormatStalenessBoundCacheControlHeader(staleness_bound));
}
HttpRequestBuilder& HttpRequestBuilder::AddHostHeader(std::string_view host) {
if (host.empty()) {
host = internal::ParseGenericUri(request_.url).authority;
}
return AddHeader(absl::StrFormat("host: %s", host));
}
}
} | #include "tensorstore/internal/http/http_request.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/kvstore/byte_range.h"
namespace {
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::testing::AnyOf;
using ::testing::ElementsAre;
TEST(HttpRequestBuilder, BuildRequest) {
auto request = HttpRequestBuilder("GET", "http:
.AddHeader("X-foo: bar")
.AddQueryParameter("name", "dragon")
.AddQueryParameter("age", "1234")
.EnableAcceptEncoding()
.BuildRequest();
EXPECT_EQ("http:
EXPECT_TRUE(request.accept_encoding);
EXPECT_EQ("GET", request.method);
EXPECT_THAT(request.headers, testing::ElementsAre("X-foo: bar"));
}
TEST(HttpRequestBuilder, AddCacheControlMaxAgeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::InfiniteDuration());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::ZeroDuration());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: max-age=10"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(-absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
}
TEST(HttpRequestBuilder, AddStalenessBoundCacheControlHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfinitePast());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfiniteFuture());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
const absl::Time kFutureTime = absl::Now() + absl::Minutes(525600);
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(kFutureTime);
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::Now() -
absl::Milliseconds(5900));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre(AnyOf("cache-control: max-age=4",
"cache-control: max-age=5")));
}
}
TEST(HttpRequestBuilder, MaybeAddRangeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::Suffix(1));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=1-"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::SuffixLength(5));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=-5"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest{1, 2});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("Range: bytes=1-1"));
}
}
TEST(HttpRequestBuilder, AddHostHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("host: 127.0.0.1"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader("host.header");
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: host.header"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: localhost:1234"));
}
}
} | HttpRequestBuilder& HttpRequestBuilder::MaybeAddCacheControlMaxAgeHeader(
absl::Duration max_age) {
return AddHeader(FormatCacheControlMaxAgeHeader(max_age));
} | TEST(HttpRequestBuilder, AddCacheControlMaxAgeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::InfiniteDuration());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::ZeroDuration());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: max-age=10"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(-absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
} |
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <stddef.h>
#include <array>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
template <size_t Arity>
class ElementwiseInputTransformNDIterator
: public NDIterator::Base<ElementwiseInputTransformNDIterator<Arity>> {
public:
explicit ElementwiseInputTransformNDIterator(
span<const NDIterable::Ptr, Arity> inputs,
ElementwiseClosure<Arity + 1, void*> closure,
NDIterable::IterationBufferKindLayoutView layout,
ArenaAllocator<> allocator)
: inputs_(inputs, layout, allocator),
context_(closure.context),
elementwise_function_((*closure.function)[layout.buffer_kind]) {}
ArenaAllocator<> get_allocator() const override {
return inputs_.get_allocator();
}
bool GetBlock(span<const Index> indices, IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
return inputs_.GetBlock(indices, block_shape, status) &&
InvokeElementwiseFunction<Arity>(
elementwise_function_, context_, block_shape,
inputs_.block_pointers(), *pointer, static_cast<void*>(status));
}
private:
NDIteratorsWithManagedBuffers<Arity> inputs_;
void* context_;
SpecializedElementwiseFunctionPointer<Arity + 1, void*> elementwise_function_;
};
template <size_t Arity>
class ElementwiseInputTransformNDIterable
: public NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>> {
using Base = NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>>;
public:
ElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity> input_iterables, DataType output_dtype,
ElementwiseClosure<Arity + 1, void*> closure, ArenaAllocator<> allocator)
: Base{std::move(input_iterables)},
output_dtype_(output_dtype),
closure_(closure),
allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
DataType dtype() const override { return output_dtype_; }
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterator<Arity>>(allocator_, this->iterables,
closure_, layout);
}
private:
std::array<NDIterable::Ptr, Arity> inputs_;
DataType output_dtype_;
ElementwiseClosure<Arity + 1, void*> closure_;
ArenaAllocator<> allocator_;
};
}
template <size_t Arity>
NDIterable::Ptr GetElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype,
ElementwiseClosure<Arity, void*> closure, Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterable<Arity - 1>>(
ArenaAllocator<>(arena), std::move(inputs), output_dtype, closure);
}
#define TENSORSTORE_INTERNAL_DO_INSTANTIATE(Arity) \
template NDIterable::Ptr GetElementwiseInputTransformNDIterable<Arity>( \
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype, \
ElementwiseClosure<Arity, void*> closure, Arena * arena); \
TENSORSTORE_INTERNAL_DO_INSTANTIATE(1)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(2)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(3)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(4)
#undef TENSORSTORE_INTERNAL_DO_INSTANTIATE
}
} | #include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <new>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::NDIterableCopier;
using ::testing::_;
using ::testing::Pair;
template <typename Func, typename DestArray, typename... SourceArray>
absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints,
DestArray dest_array, SourceArray... source_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<sizeof...(SourceArray) + 1, void*>
closure = tensorstore::internal::SimpleElementwiseFunction<
Func(typename SourceArray::Element..., typename DestArray::Element),
void*>::Closure(&func);
auto iterable = tensorstore::internal::GetElementwiseInputTransformNDIterable(
{{tensorstore::internal::GetTransformedArrayNDIterable(source_array,
&arena)
.value()...}},
tensorstore::dtype_v<typename DestArray::Element>, closure, &arena);
return NDIterableCopier(*iterable,
*tensorstore::internal::GetTransformedArrayNDIterable(
dest_array, &arena)
.value(),
dest_array.shape(), constraints, &arena)
.Copy();
}
TEST(NDIterableElementwiseInputTransformTest, Nullary) {
auto dest = tensorstore::AllocateArray<double>({2, 3});
TENSORSTORE_EXPECT_OK(TestCopy([](double* dest, void* arg) { *dest = 42.0; },
{}, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{42.0, 42.0, 42.0}, {42.0, 42.0, 42.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Unary) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* arg) { *dest = -*source; },
{}, dest, source));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Binary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(TestCopy([](const int* a, const int* b, double* dest,
void* arg) { *dest = 2.0 * *a + *b; },
{}, dest, a, b));
EXPECT_EQ(
tensorstore::MakeArray<double>({{12.0, 16.0, 20.0}, {24.0, 28.0, 32.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Ternary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto c = tensorstore::MakeArray<double>({{1, -1, 1}, {-1, -1, 1}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(
TestCopy([](const int* a, const int* b, const double* c, double* dest,
void* arg) { *dest = *a + *b * *c; },
{}, dest, a, b, c));
EXPECT_EQ(
tensorstore::MakeArray<double>({{1 + 10 * 1, 2 + 12 * -1, 3 + 14 * 1},
{4 + 16 * -1, 5 + 18 * -1, 6 + 20 * 1}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, dest, source),
MatchesStatus(absl::StatusCode::kUnknown, "zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
}
} | DataType dtype() const override { return output_dtype_; } | TEST(NDIterableElementwiseInputTransformTest, Nullary) {
auto dest = tensorstore::AllocateArray<double>({2, 3});
TENSORSTORE_EXPECT_OK(TestCopy([](double* dest, void* arg) { *dest = 42.0; },
{}, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{42.0, 42.0, 42.0}, {42.0, 42.0, 42.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Unary) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* arg) { *dest = -*source; },
{}, dest, source));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Binary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(TestCopy([](const int* a, const int* b, double* dest,
void* arg) { *dest = 2.0 * *a + *b; },
{}, dest, a, b));
EXPECT_EQ(
tensorstore::MakeArray<double>({{12.0, 16.0, 20.0}, {24.0, 28.0, 32.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Ternary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto c = tensorstore::MakeArray<double>({{1, -1, 1}, {-1, -1, 1}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(
TestCopy([](const int* a, const int* b, const double* c, double* dest,
void* arg) { *dest = *a + *b * *c; },
{}, dest, a, b, c));
EXPECT_EQ(
tensorstore::MakeArray<double>({{1 + 10 * 1, 2 + 12 * -1, 3 + 14 * 1},
{4 + 16 * -1, 5 + 18 * -1, 6 + 20 * 1}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, dest, source),
MatchesStatus(absl::StatusCode::kUnknown, "zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
} |
#include "arolla/qexpr/operator_metadata.h"
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/indestructible.h"
namespace arolla {
absl::Status QExprOperatorMetadataRegistry::AddOperatorFamilyMetadata(
QExprOperatorFamilyMetadata metadata) {
absl::WriterMutexLock lock(&mutex_);
if (family_metadatas_.contains(metadata.name) ||
operator_metadatas_.contains(metadata.name)) {
return absl::Status(
absl::StatusCode::kAlreadyExists,
absl::StrFormat("trying to register individual operator or operator "
"family metadata twice under the same name %s",
metadata.name));
}
family_metadatas_.emplace(metadata.name, std::move(metadata));
return absl::OkStatus();
}
absl::Status QExprOperatorMetadataRegistry::AddOperatorMetadata(
QExprOperatorMetadata metadata) {
absl::WriterMutexLock lock(&mutex_);
if (family_metadatas_.contains(metadata.name)) {
return absl::Status(
absl::StatusCode::kAlreadyExists,
absl::StrFormat("trying to register individual operator or operator "
"family metadata twice under the same name %s",
metadata.name));
}
auto [iter, inserted] =
operator_metadatas_.emplace(metadata.name, TypeToMetadata{});
if (iter->second.contains(metadata.input_qtypes)) {
return absl::Status(
absl::StatusCode::kAlreadyExists,
absl::StrFormat("trying to register operator metadata twice "
"for operator %s with input types %s",
metadata.name,
FormatTypeVector(metadata.input_qtypes)));
}
iter->second.emplace(metadata.input_qtypes, std::move(metadata));
return absl::OkStatus();
}
absl::StatusOr<QExprOperatorMetadata>
QExprOperatorMetadataRegistry::LookupOperatorMetadata(
absl::string_view op_name, absl::Span<const QTypePtr> input_qtypes) const {
absl::ReaderMutexLock lock(&mutex_);
std::vector<QTypePtr> input_qtypes_vector(input_qtypes.begin(),
input_qtypes.end());
if (auto m = family_metadatas_.find(op_name); m != family_metadatas_.end()) {
return QExprOperatorMetadata{
.name = std::string(m->second.name),
.input_qtypes = std::move(input_qtypes_vector),
.build_details = m->second.family_build_details};
}
if (auto oms = operator_metadatas_.find(op_name);
oms != operator_metadatas_.end()) {
if (auto m = oms->second.find(input_qtypes_vector);
m != oms->second.end()) {
return m->second;
}
}
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrFormat(
"no metadata is available for operator %s with input types %s",
op_name, FormatTypeVector(input_qtypes)));
}
QExprOperatorMetadataRegistry& QExprOperatorMetadataRegistry::GetInstance() {
static Indestructible<QExprOperatorMetadataRegistry> instance;
return *instance;
}
absl::flat_hash_map<std::string, std::set<std::string>>
QExprOperatorMetadataRegistry::OperatorBuildDependencies() const {
absl::flat_hash_map<std::string, std::set<std::string>> result;
absl::ReaderMutexLock lock(&mutex_);
for (const auto& [_, metadata] : family_metadatas_) {
result[absl::StrCat(metadata.name, "(...)")].insert(
metadata.family_build_details.build_target);
}
for (const auto& [name, type_to_meta] : operator_metadatas_) {
for (const auto& [types, metadata] : type_to_meta) {
std::string name_with_types =
absl::StrCat(name, ::arolla::FormatTypeVector(types));
result[name_with_types].insert(metadata.build_details.build_target);
}
}
return result;
}
int RegisterOperatorFamilyMetadataOrDie(QExprOperatorFamilyMetadata metadata) {
auto status =
QExprOperatorMetadataRegistry::GetInstance().AddOperatorFamilyMetadata(
std::move(metadata));
if (!status.ok()) {
LOG(FATAL) << status;
}
return 57;
}
int RegisterOperatorMetadataOrDie(QExprOperatorMetadata metadata) {
auto status =
QExprOperatorMetadataRegistry::GetInstance().AddOperatorMetadata(
std::move(metadata));
if (!status.ok()) {
LOG(FATAL) << status;
}
return 57;
}
} | #include "arolla/qexpr/operator_metadata.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/operator_name.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::Eq;
using ::testing::Field;
using ::testing::MatchesRegex;
TEST(OperatorMetadataTest, OperatorMetadata) {
auto i32 = GetQType<int32_t>();
auto f32 = GetQType<float>();
QExprOperatorMetadata add_ints_meta;
add_ints_meta.name = AROLLA_OPERATOR_NAME("test.add");
add_ints_meta.input_qtypes = {i32, i32};
add_ints_meta.build_details.op_class = "Add<int>";
QExprOperatorMetadata add_floats_meta;
add_floats_meta.name = AROLLA_OPERATOR_NAME("test.add");
add_floats_meta.input_qtypes = {f32, f32};
add_floats_meta.build_details.op_class = "Add<float>";
QExprOperatorMetadataRegistry registry;
ASSERT_OK(registry.AddOperatorMetadata(add_ints_meta));
ASSERT_OK(registry.AddOperatorMetadata(add_floats_meta));
EXPECT_THAT(
registry.AddOperatorMetadata(add_ints_meta),
StatusIs(absl::StatusCode::kAlreadyExists,
MatchesRegex("trying to register operator metadata twice for "
"operator test.add with input types .*")));
EXPECT_THAT(
registry.AddOperatorFamilyMetadata(QExprOperatorFamilyMetadata{
.name = add_ints_meta.name, .family_build_details = {}}),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(registry.LookupOperatorMetadata(add_ints_meta.name, {i32, i32}),
IsOkAndHolds(Field(&QExprOperatorMetadata::build_details,
Field(&BuildDetails::op_class, "Add<int>"))));
}
TEST(OperatorMetadataTest, OperatorFamilyMetadata) {
auto i32 = GetQType<int32_t>();
::arolla::BuildDetails family_build_details;
family_build_details.op_family_class = "AddFamily";
QExprOperatorFamilyMetadata add_meta{
.name = "test.add", .family_build_details = family_build_details};
QExprOperatorMetadataRegistry registry;
ASSERT_OK(registry.AddOperatorFamilyMetadata(add_meta));
EXPECT_THAT(
registry.AddOperatorMetadata(QExprOperatorMetadata{
.name = "test.add", .input_qtypes = {i32, i32}}),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(
registry.AddOperatorFamilyMetadata(add_meta),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(
registry.LookupOperatorMetadata(add_meta.name, {i32, i32}),
IsOkAndHolds(Field(&QExprOperatorMetadata::build_details,
Field(&BuildDetails::op_family_class, "AddFamily"))));
}
}
} | absl::StatusOr<QExprOperatorMetadata>
QExprOperatorMetadataRegistry::LookupOperatorMetadata(
absl::string_view op_name, absl::Span<const QTypePtr> input_qtypes) const {
absl::ReaderMutexLock lock(&mutex_);
std::vector<QTypePtr> input_qtypes_vector(input_qtypes.begin(),
input_qtypes.end());
if (auto m = family_metadatas_.find(op_name); m != family_metadatas_.end()) {
return QExprOperatorMetadata{
.name = std::string(m->second.name),
.input_qtypes = std::move(input_qtypes_vector),
.build_details = m->second.family_build_details};
}
if (auto oms = operator_metadatas_.find(op_name);
oms != operator_metadatas_.end()) {
if (auto m = oms->second.find(input_qtypes_vector);
m != oms->second.end()) {
return m->second;
}
}
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrFormat(
"no metadata is available for operator %s with input types %s",
op_name, FormatTypeVector(input_qtypes)));
} | TEST(OperatorMetadataTest, OperatorMetadata) {
auto i32 = GetQType<int32_t>();
auto f32 = GetQType<float>();
QExprOperatorMetadata add_ints_meta;
add_ints_meta.name = AROLLA_OPERATOR_NAME("test.add");
add_ints_meta.input_qtypes = {i32, i32};
add_ints_meta.build_details.op_class = "Add<int>";
QExprOperatorMetadata add_floats_meta;
add_floats_meta.name = AROLLA_OPERATOR_NAME("test.add");
add_floats_meta.input_qtypes = {f32, f32};
add_floats_meta.build_details.op_class = "Add<float>";
QExprOperatorMetadataRegistry registry;
ASSERT_OK(registry.AddOperatorMetadata(add_ints_meta));
ASSERT_OK(registry.AddOperatorMetadata(add_floats_meta));
EXPECT_THAT(
registry.AddOperatorMetadata(add_ints_meta),
StatusIs(absl::StatusCode::kAlreadyExists,
MatchesRegex("trying to register operator metadata twice for "
"operator test.add with input types .*")));
EXPECT_THAT(
registry.AddOperatorFamilyMetadata(QExprOperatorFamilyMetadata{
.name = add_ints_meta.name, .family_build_details = {}}),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(registry.LookupOperatorMetadata(add_ints_meta.name, {i32, i32}),
IsOkAndHolds(Field(&QExprOperatorMetadata::build_details,
Field(&BuildDetails::op_class, "Add<int>"))));
}
TEST(OperatorMetadataTest, OperatorFamilyMetadata) {
auto i32 = GetQType<int32_t>();
::arolla::BuildDetails family_build_details;
family_build_details.op_family_class = "AddFamily";
QExprOperatorFamilyMetadata add_meta{
.name = "test.add", .family_build_details = family_build_details};
QExprOperatorMetadataRegistry registry;
ASSERT_OK(registry.AddOperatorFamilyMetadata(add_meta));
EXPECT_THAT(
registry.AddOperatorMetadata(QExprOperatorMetadata{
.name = "test.add", .input_qtypes = {i32, i32}}),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(
registry.AddOperatorFamilyMetadata(add_meta),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(
registry.LookupOperatorMetadata(add_meta.name, {i32, i32}),
IsOkAndHolds(Field(&QExprOperatorMetadata::build_details,
Field(&BuildDetails::op_family_class, "AddFamily"))));
} |
#include "tensorflow/lite/tools/model_loader.h"
#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace tools {
bool ModelLoader::Init() {
if (model_ && model_->initialized()) {
return true;
}
if (!InitInternal()) {
return false;
}
if (!model_ || !model_->initialized()) {
return false;
}
return true;
}
bool PathModelLoader::InitInternal() {
if (model_path_.empty()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "model_path is empty.");
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromFile(model_path_.c_str());
return true;
}
bool BufferModelLoader::InitInternal() {
if (!caller_owned_buffer_ || model_size_ <= 0) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to create BufferModelLoader: caller_owned_buffer "
"is %s; model_size: %zu",
caller_owned_buffer_ ? "not null" : "null", model_size_);
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromBuffer(caller_owned_buffer_,
model_size_);
return true;
}
#ifndef _WIN32
bool MmapModelLoader::InitInternal() {
if (model_fd_ < 0 || model_offset_ < 0 || model_size_ < 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Invalid model file descriptor. file descriptor: %d model_offset: "
"%zu model_size: %zu",
model_fd_, model_offset_, model_size_);
return false;
}
if (!MMAPAllocation::IsSupported()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "MMAPAllocation is not supported.");
return false;
}
auto allocation = std::make_unique<MMAPAllocation>(
model_fd_, model_offset_, model_size_, tflite::DefaultErrorReporter());
if (!allocation->valid()) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "MMAPAllocation is not valid.");
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromAllocation(std::move(allocation));
#if FLATBUFFERS_LITTLEENDIAN == 0
model_ = FlatBufferModel::ByteConvertModel(std::move(model_));
#endif
return true;
}
bool PipeModelLoader::InitInternal() {
if (pipe_fd_ < 0) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Invalid pipe file descriptor %d",
pipe_fd_);
return false;
}
std::free(model_buffer_);
model_buffer_ = reinterpret_cast<uint8_t*>(std::malloc(model_size_));
int read_bytes = 0;
int remaining_bytes = model_size_;
uint8_t* buffer = model_buffer_;
while (remaining_bytes > 0 &&
(read_bytes = read(pipe_fd_, buffer, remaining_bytes)) > 0) {
remaining_bytes -= read_bytes;
buffer += read_bytes;
}
close(pipe_fd_);
if (read_bytes < 0 || remaining_bytes != 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Read Model from pipe failed: %s. Expect to read %zu bytes, "
"%d bytes missing.",
std::strerror(errno), model_size_, remaining_bytes);
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromBuffer(
reinterpret_cast<const char*>(model_buffer_), model_size_);
return true;
}
#endif
std::unique_ptr<ModelLoader> CreateModelLoaderFromPath(
const std::string& path) {
std::vector<absl::string_view> parts = absl::StrSplit(path, ':');
if (parts.empty()) {
return nullptr;
}
#ifndef _WIN32
if (parts[0] == "fd") {
int model_fd;
size_t model_offset, model_size;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[1], &model_fd) ||
!absl::SimpleAtoi(parts[2], &model_offset) ||
!absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
return std::make_unique<MmapModelLoader>(model_fd, model_offset,
model_size);
}
if (parts[0] == "pipe") {
int read_fd, write_fd;
size_t model_size;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[1], &read_fd) ||
!absl::SimpleAtoi(parts[2], &write_fd) ||
!absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
if (write_fd >= 0) {
close(write_fd);
}
return std::make_unique<PipeModelLoader>(read_fd, model_size);
}
#endif
if (parts[0] == "buffer") {
int64_t buffer_handle;
size_t model_size;
if (parts.size() != 3 || !absl::SimpleAtoi(parts[1], &buffer_handle) ||
!absl::SimpleAtoi(parts[2], &model_size)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to parse model path: %s",
path.c_str());
return nullptr;
}
return std::make_unique<BufferModelLoader>(
reinterpret_cast<const char*>(buffer_handle), model_size);
}
return std::make_unique<PathModelLoader>(path);
}
}
} | #include "tensorflow/lite/tools/model_loader.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace tools {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
using ::testing::IsNull;
using ::testing::Not;
using ::testing::WhenDynamicCastTo;
class ModelLoaderTest : public ::testing::Test {};
TEST_F(ModelLoaderTest, CreateFromModelPath) {
auto model_loader = std::make_unique<PathModelLoader>(kModelPath);
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateFromFdPath) {
int fd = open(kModelPath, O_RDONLY);
ASSERT_GE(fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(fd, &stat_buf), 0);
auto model_loader =
std::make_unique<MmapModelLoader>(fd, 0, stat_buf.st_size);
close(fd);
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateFromPipePath) {
auto model = FlatBufferModel::BuildFromFile(kModelPath);
flatbuffers::FlatBufferBuilder fbb;
ModelT model_obj;
model->GetModel()->UnPackTo(&model_obj);
std::string model_description = model_obj.description;
FinishModelBuffer(fbb, CreateModel(fbb, &model_obj));
int pipe_fds[2];
ASSERT_EQ(pipe(pipe_fds), 0);
pid_t r = fork();
if (r == 0) {
close(pipe_fds[0]);
int written_bytes = 0;
int remaining_bytes = fbb.GetSize();
uint8_t* buffer = fbb.GetBufferPointer();
while (remaining_bytes > 0 &&
(written_bytes = write(pipe_fds[1], buffer, remaining_bytes)) > 0) {
remaining_bytes -= written_bytes;
buffer += written_bytes;
}
close(pipe_fds[1]);
ASSERT_TRUE(written_bytes > 0 && remaining_bytes == 0);
_exit(0);
}
close(pipe_fds[1]);
auto model_loader =
std::make_unique<PipeModelLoader>(pipe_fds[0], fbb.GetSize());
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
EXPECT_EQ(model_loader->GetModel()->GetModel()->description()->string_view(),
model_description);
}
TEST_F(ModelLoaderTest, CreateBufferModelLoader) {
auto model = FlatBufferModel::BuildFromFile(kModelPath);
flatbuffers::FlatBufferBuilder fbb;
ModelT model_obj;
model->GetModel()->UnPackTo(&model_obj);
std::string model_description = model_obj.description;
FinishModelBuffer(fbb, CreateModel(fbb, &model_obj));
ASSERT_NE(model->allocation(), nullptr);
auto model_loader = std::make_unique<BufferModelLoader>(
reinterpret_cast<const char*>(fbb.GetBufferPointer()), fbb.GetSize());
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
EXPECT_EQ(model_loader->GetModel()->GetModel()->description()->string_view(),
model_description);
}
TEST_F(ModelLoaderTest, InvalidModelPath) {
auto model_loader = std::make_unique<PathModelLoader>("invalid/path");
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, InvalidFd) {
auto model_loader = std::make_unique<MmapModelLoader>(0, 5, 10);
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, InvalidPipe) {
auto model_loader = std::make_unique<PipeModelLoader>(-1, 10);
ASSERT_NE(model_loader, nullptr);
EXPECT_FALSE(model_loader->Init());
}
TEST_F(ModelLoaderTest, CreateModelLoaderFromValidPath) {
EXPECT_THAT(CreateModelLoaderFromPath("a/b/c").get(),
WhenDynamicCastTo<PathModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("fd:1:2:3").get(),
WhenDynamicCastTo<MmapModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("pipe:1:2:3").get(),
WhenDynamicCastTo<PipeModelLoader*>(Not(IsNull())));
EXPECT_THAT(CreateModelLoaderFromPath("buffer:1:2").get(),
WhenDynamicCastTo<BufferModelLoader*>(Not(IsNull())));
}
TEST_F(ModelLoaderTest, CreateModelLoaderFromInvalidPath) {
EXPECT_EQ(CreateModelLoaderFromPath("fd:1"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("fd:1:2:3:4"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("pipe:1"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("pipe:1:2:3:4"), nullptr);
EXPECT_EQ(CreateModelLoaderFromPath("buffer:1:2:3"), nullptr);
}
}
}
} | bool BufferModelLoader::InitInternal() {
if (!caller_owned_buffer_ || model_size_ <= 0) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to create BufferModelLoader: caller_owned_buffer "
"is %s; model_size: %zu",
caller_owned_buffer_ ? "not null" : "null", model_size_);
return false;
}
model_ = FlatBufferModel::VerifyAndBuildFromBuffer(caller_owned_buffer_,
model_size_);
return true;
} | TEST_F(ModelLoaderTest, CreateBufferModelLoader) {
auto model = FlatBufferModel::BuildFromFile(kModelPath);
flatbuffers::FlatBufferBuilder fbb;
ModelT model_obj;
model->GetModel()->UnPackTo(&model_obj);
std::string model_description = model_obj.description;
FinishModelBuffer(fbb, CreateModel(fbb, &model_obj));
ASSERT_NE(model->allocation(), nullptr);
auto model_loader = std::make_unique<BufferModelLoader>(
reinterpret_cast<const char*>(fbb.GetBufferPointer()), fbb.GetSize());
ASSERT_NE(model_loader, nullptr);
EXPECT_TRUE(model_loader->Init());
EXPECT_EQ(model_loader->GetModel()->GetModel()->description()->string_view(),
model_description);
} |
#include "tensorflow/core/framework/node_properties.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#include "tensorflow/core/common_runtime/gpu/gpu_managed_allocator.h"
#endif
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/type_index.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
namespace tensorflow {
namespace test {
void SetOutputAttrs(OpKernelContext::Params* params,
std::vector<AllocatorAttributes>* attrs) {
attrs->clear();
for (int index = 0; index < params->op_kernel->num_outputs(); index++) {
AllocatorAttributes attr;
const bool on_host =
(params->op_kernel->output_memory_types()[index] == HOST_MEMORY);
attr.set_on_host(on_host);
attrs->push_back(attr);
}
params->output_attr_array = attrs->data();
}
}
OpsTestBase::OpsTestBase() : device_type_(DEVICE_CPU) {
auto device = DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0");
CHECK(device) << "Could not create CPU device";
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), "default", 1);
device_ = device.get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device));
allocator_ = device_->GetAllocator(AllocatorAttributes());
flib_def_ = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(),
FunctionDefLibrary());
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions());
}
OpsTestBase::~OpsTestBase() {
for (auto& temp : tensors_) {
delete temp;
}
for (auto& temp : managed_outputs_) {
delete temp;
}
tensors_.clear();
managed_outputs_.clear();
context_.reset(nullptr);
params_.reset(nullptr);
}
void OpsTestBase::SetDevice(const DeviceType& device_type,
std::unique_ptr<Device> device) {
CHECK(device_) << "No device provided";
device_ = device.get();
device_type_ = device_type;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (device_type == DEVICE_GPU) {
managed_allocator_.reset(new GpuManagedAllocator());
allocator_ = managed_allocator_.get();
} else {
managed_allocator_.reset();
allocator_ = device_->GetAllocator(AllocatorAttributes());
}
#else
CHECK_NE(device_type, DEVICE_GPU)
<< "Requesting GPU on binary compiled without GOOGLE_CUDA or "
"TENSORFLOW_USE_ROCM.";
allocator_ = device_->GetAllocator(AllocatorAttributes());
#endif
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device));
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(),
thread_pool_.get());
}
void OpsTestBase::set_node_def(const NodeDef& node_def) {
node_def_.CopyFrom(node_def);
}
NodeDef* OpsTestBase::node_def() { return &node_def_; }
Status OpsTestBase::InitOp() {
return InitOpWithGraphVersion(TF_GRAPH_DEF_VERSION);
}
Status OpsTestBase::InitOpWithGraphVersion(int graph_def_version) {
std::shared_ptr<const NodeProperties> props;
TF_RETURN_IF_ERROR(NodeProperties::CreateFromNodeDef(
node_def_, OpRegistry::Global(), &props));
OpKernel* kernel;
TF_RETURN_IF_ERROR(CreateOpKernel(
device_type_, device_, allocator(), nullptr,
device_->resource_manager(), props, graph_def_version, &kernel));
kernel_.reset(kernel);
input_types_ = kernel_->input_types();
return absl::OkStatus();
}
static std::function<void(std::function<void()>)>* GetDefaultRunner() {
static auto* const default_runner =
new std::function<void(std::function<void()>)>(
[](const std::function<void()>& f) { f(); });
return default_runner;
}
void OpsTestBase::CreateContext() {
context_.reset(nullptr);
for (auto& temp : managed_outputs_) {
delete temp;
}
managed_outputs_.clear();
managed_outputs_.resize(0);
params_.reset(new OpKernelContext::Params);
params_->device = device_;
params_->frame_iter = FrameAndIter(0, 0);
params_->inputs = inputs_;
params_->op_kernel = kernel_.get();
step_container_.reset(new ScopedStepContainer(0, [](const string&) {}));
params_->step_container = step_container_.get();
test::SetOutputAttrs(params_.get(), &out_alloc_attrs_);
params_->slice_reader_cache = &slice_reader_cache_wrapper_;
params_->cancellation_manager = &default_cancellation_manager_;
params_->resource_manager = device_->resource_manager();
params_->function_library = pflr_->GetFLR(device_->name());
params_->runner = GetDefaultRunner();
params_->session_metadata = &session_metadata();
context_.reset(new OpKernelContext(params_.get()));
}
Status OpsTestBase::RunOpKernel() {
CreateContext();
device_->Compute(kernel_.get(), context_.get());
return context_->status();
}
const Tensor& OpsTestBase::GetInput(int input_index) const {
CHECK_LT(input_index, context_->num_inputs());
CHECK(!IsRefType(context_->input_dtype(input_index)));
return context_->input(input_index);
}
TensorValue OpsTestBase::mutable_input(int input_index) {
CHECK_LT(input_index, inputs_.size());
return inputs_[input_index];
}
Tensor* OpsTestBase::GetOutput(int output_index) {
CHECK_LT(output_index, context_->num_outputs());
Tensor* output = context_->mutable_output(output_index);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (device_type_ == DEVICE_GPU) {
managed_outputs_.resize(context_->num_outputs());
if (!managed_outputs_[output_index]) {
Tensor* managed_output =
new Tensor(allocator(), output->dtype(), output->shape());
auto src = output->tensor_data();
auto dst = managed_output->tensor_data();
context_->eigen_gpu_device().memcpyDeviceToHost(
const_cast<char*>(dst.data()), src.data(), src.size());
context_->eigen_gpu_device().synchronize();
managed_outputs_[output_index] = managed_output;
}
output = managed_outputs_[output_index];
}
#endif
return output;
}
Allocator* OpsTestBase::allocator() { return allocator_; }
OpKernel* OpsTestBase::op_kernel() { return kernel_.get(); }
const DataTypeVector& OpsTestBase::output_types() const {
return kernel_->output_types();
}
Tensor* OpsTestBase::AddInput(DataType dtype, const TensorShape& shape) {
CHECK_GT(input_types_.size(), inputs_.size())
<< "Adding more inputs than types; perhaps you need to call MakeOp";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(allocator(), dtype, shape);
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]), dtype);
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], dtype);
inputs_.push_back({nullptr, input});
}
return input;
}
void OpsTestBase::AddResourceInputInternal(const std::string& container_name,
const std::string& name,
const TypeIndex& type_index) {
ResourceHandle handle;
handle.set_device(device_->name());
handle.set_container(container_name);
handle.set_name(name);
handle.set_hash_code(type_index.hash_code());
handle.set_maybe_type_name(type_index.name());
Tensor* input = new Tensor(allocator(), DT_RESOURCE, TensorShape({}));
input->scalar<ResourceHandle>()() = handle;
tensors_.push_back(input);
inputs_.push_back({nullptr, input});
}
} | #include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
TEST_F(OpsTestBase, ScopedStepContainer) {
TF_EXPECT_OK(NodeDefBuilder("identity", "Identity")
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {""});
TF_EXPECT_OK(RunOpKernel());
EXPECT_TRUE(step_container_ != nullptr);
}
TEST_F(OpsTestBase, ResourceVariableInput) {
TF_EXPECT_OK(NodeDefBuilder("identity", "Identity")
.Input(FakeInput(DT_RESOURCE))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
Var* var = new Var(DT_STRING);
AddResourceInput("" , "Test" , var);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ(output->dtype(), DT_RESOURCE);
}
} | void OpsTestBase::set_node_def(const NodeDef& node_def) {
node_def_.CopyFrom(node_def);
} | #include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
TEST_F(OpsTestBase, ScopedStepContainer) {
TF_EXPECT_OK(NodeDefBuilder("identity", "Identity")
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {""});
TF_EXPECT_OK(RunOpKernel());
EXPECT_TRUE(step_container_ != nullptr);
}
TEST_F(OpsTestBase, ResourceVariableInput) {
TF_EXPECT_OK(NodeDefBuilder("identity", "Identity")
.Input(FakeInput(DT_RESOURCE))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
Var* var = new Var(DT_STRING);
AddResourceInput("" , "Test" , var);
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
EXPECT_EQ(output->dtype(), DT_RESOURCE);
} |
#include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <optional>
#include <string>
#include "absl/base/nullability.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
namespace mlir::quant::stablehlo {
namespace {
std::optional<std::string> OptionalStringViewToOptionalString(
std::optional<absl::string_view> view) {
if (view == std::nullopt) return std::nullopt;
return std::make_optional<std::string>(*view);
}
bool IsQuantizeCompositeFunctionPass(absl::Nullable<Pass*> pass,
absl::Nullable<Operation*> op) {
return pass != nullptr &&
pass->getArgument() == "stablehlo-quantize-composite-functions" &&
isa_and_nonnull<ModuleOp>(op);
}
bool ShouldSaveReport(absl::Nullable<Pass*> pass, absl::Nullable<Operation*> op,
const std::optional<std::string>& file_path) {
return file_path != std::nullopt && IsQuantizeCompositeFunctionPass(pass, op);
}
void SaveReport(const QuantizationReport& report,
const absl::string_view file_path) {
if (const absl::Status save_status = report.Save(file_path);
save_status.ok()) {
LOG(INFO) << "Successfully saved quantization report to: " << file_path;
} else {
LOG(ERROR) << "Failed to save quantization report to: " << file_path
<< " with status: " << save_status;
}
}
}
SaveQuantizationReportInstrumentation::SaveQuantizationReportInstrumentation(
std::optional<absl::string_view> file_path)
: file_path_(OptionalStringViewToOptionalString(file_path)) {}
void SaveQuantizationReportInstrumentation::runAfterPass(Pass* pass,
Operation* op) {
if (!IsQuantizeCompositeFunctionPass(pass, op)) return;
auto module_op = cast<ModuleOp>(op);
const QuantizationReport report(module_op);
report.Print();
if (!ShouldSaveReport(pass, op, file_path_)) return;
SaveReport(report, *file_path_);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using SaveQuantizationReportInstrumentationTest = QuantizationTestBase;
TEST_F(SaveQuantizationReportInstrumentationTest, SaveReport) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "tf.Const"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
const std::string report_file_path =
absl::StrCat(testing::TempDir(), "/save_report.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
const absl::StatusOr<std::string> file_data =
ReadFileToString(report_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenNoQuantizeCompositeFunctionsPass) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
pm.addPass(createPrepareQuantizePass());
const std::string report_file_path = absl::StrCat(
testing::TempDir(),
"/report_not_saved_no_quantize_composite_functions_pass.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
EXPECT_THAT(ReadFileToString(report_file_path),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenReportFilePathIsNullopt) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
std::nullopt));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
}
}
} | SaveQuantizationReportInstrumentation::SaveQuantizationReportInstrumentation(
std::optional<absl::string_view> file_path)
: file_path_(OptionalStringViewToOptionalString(file_path)) {} | TEST_F(SaveQuantizationReportInstrumentationTest, SaveReport) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "tf.Const"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
const std::string report_file_path =
absl::StrCat(testing::TempDir(), "/save_report.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
const absl::StatusOr<std::string> file_data =
ReadFileToString(report_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenNoQuantizeCompositeFunctionsPass) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
pm.addPass(createPrepareQuantizePass());
const std::string report_file_path = absl::StrCat(
testing::TempDir(),
"/report_not_saved_no_quantize_composite_functions_pass.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
EXPECT_THAT(ReadFileToString(report_file_path),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenReportFilePathIsNullopt) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
std::nullopt));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
} |
#include "tsl/lib/io/table.h"
#include "tsl/lib/io/block.h"
#include "tsl/lib/io/cache.h"
#include "tsl/lib/io/format.h"
#include "tsl/lib/io/table_options.h"
#include "tsl/lib/io/two_level_iterator.h"
#include "tsl/platform/coding.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
namespace tsl {
namespace table {
struct Table::Rep {
~Rep() { delete index_block; }
Options options;
absl::Status status;
RandomAccessFile* file;
uint64 cache_id;
BlockHandle metaindex_handle;
Block* index_block;
};
absl::Status Table::Open(const Options& options, RandomAccessFile* file,
uint64 size, Table** table) {
*table = nullptr;
if (size < Footer::kEncodedLength) {
return errors::DataLoss("file is too short to be an sstable");
}
char footer_space[Footer::kEncodedLength];
StringPiece footer_input;
absl::Status s =
file->Read(size - Footer::kEncodedLength, Footer::kEncodedLength,
&footer_input, footer_space);
if (!s.ok()) return s;
Footer footer;
s = footer.DecodeFrom(&footer_input);
if (!s.ok()) return s;
BlockContents contents;
Block* index_block = nullptr;
if (s.ok()) {
s = ReadBlock(file, footer.index_handle(), &contents);
}
if (s.ok()) {
index_block = new Block(contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
*table = new Table(rep);
} else {
if (index_block) delete index_block;
}
return s;
}
Table::~Table() { delete rep_; }
static void DeleteBlock(void* arg, void* ignored) {
delete reinterpret_cast<Block*>(arg);
}
static void DeleteCachedBlock(const absl::string_view&, void* value) {
Block* block = reinterpret_cast<Block*>(value);
delete block;
}
static void ReleaseBlock(void* arg, void* h) {
Cache* cache = reinterpret_cast<Cache*>(arg);
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
cache->Release(handle);
}
Iterator* Table::BlockReader(void* arg, const StringPiece& index_value) {
Table* table = reinterpret_cast<Table*>(arg);
Cache* block_cache = table->rep_->options.block_cache;
Block* block = nullptr;
Cache::Handle* cache_handle = nullptr;
BlockHandle handle;
StringPiece input = index_value;
absl::Status s = handle.DecodeFrom(&input);
if (s.ok()) {
BlockContents contents;
if (block_cache != nullptr) {
char cache_key_buffer[16];
core::EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
core::EncodeFixed64(cache_key_buffer + 8, handle.offset());
absl::string_view key(cache_key_buffer, sizeof(cache_key_buffer));
cache_handle = block_cache->Lookup(key);
if (cache_handle != nullptr) {
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
} else {
s = ReadBlock(table->rep_->file, handle, &contents);
if (s.ok()) {
block = new Block(contents);
cache_handle = block_cache->Insert(key, block, block->size(),
&DeleteCachedBlock);
}
}
} else {
s = ReadBlock(table->rep_->file, handle, &contents);
if (s.ok()) {
block = new Block(contents);
}
}
}
Iterator* iter;
if (block != nullptr) {
iter = block->NewIterator();
if (cache_handle == nullptr) {
iter->RegisterCleanup(&DeleteBlock, block, nullptr);
} else {
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
}
} else {
iter = NewErrorIterator(s);
}
return iter;
}
Iterator* Table::NewIterator() const {
return NewTwoLevelIterator(rep_->index_block->NewIterator(),
&Table::BlockReader, const_cast<Table*>(this));
}
absl::Status Table::InternalGet(const StringPiece& k, void* arg,
void (*saver)(void*, const StringPiece&,
const StringPiece&)) {
absl::Status s;
Iterator* iiter = rep_->index_block->NewIterator();
iiter->Seek(k);
if (iiter->Valid()) {
Iterator* block_iter = BlockReader(this, iiter->value());
block_iter->Seek(k);
if (block_iter->Valid()) {
(*saver)(arg, block_iter->key(), block_iter->value());
}
s = block_iter->status();
delete block_iter;
}
if (s.ok()) {
s = iiter->status();
}
delete iiter;
return s;
}
uint64 Table::ApproximateOffsetOf(const StringPiece& key) const {
Iterator* index_iter = rep_->index_block->NewIterator();
index_iter->Seek(key);
uint64 result;
if (index_iter->Valid()) {
BlockHandle handle;
StringPiece input = index_iter->value();
absl::Status s = handle.DecodeFrom(&input);
if (s.ok()) {
result = handle.offset();
} else {
result = rep_->metaindex_handle.offset();
}
} else {
result = rep_->metaindex_handle.offset();
}
delete index_iter;
return result;
}
}
} | #include "tsl/lib/io/table.h"
#include <algorithm>
#include <map>
#include <string>
#include <vector>
#include "absl/strings/escaping.h"
#include "tsl/lib/io/block.h"
#include "tsl/lib/io/block_builder.h"
#include "tsl/lib/io/format.h"
#include "tsl/lib/io/iterator.h"
#include "tsl/lib/io/table_builder.h"
#include "tsl/lib/random/simple_philox.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/snappy.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace table {
namespace {
typedef std::pair<StringPiece, StringPiece> StringPiecePair;
}
namespace test {
static StringPiece RandomString(random::SimplePhilox* rnd, int len,
string* dst) {
dst->resize(len);
for (int i = 0; i < len; i++) {
(*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95));
}
return StringPiece(*dst);
}
static string RandomKey(random::SimplePhilox* rnd, int len) {
static const char kTestChars[] = {'\0', '\1', 'a', 'b', 'c',
'd', 'e', '\xfd', '\xfe', '\xff'};
string result;
for (int i = 0; i < len; i++) {
result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
}
return result;
}
static StringPiece CompressibleString(random::SimplePhilox* rnd,
double compressed_fraction, size_t len,
string* dst) {
int raw = static_cast<int>(len * compressed_fraction);
if (raw < 1) raw = 1;
string raw_data;
RandomString(rnd, raw, &raw_data);
dst->clear();
while (dst->size() < len) {
dst->append(raw_data);
}
dst->resize(len);
return StringPiece(*dst);
}
}
static void Increment(string* key) { key->push_back('\0'); }
namespace {
struct STLLessThan {
STLLessThan() {}
bool operator()(const string& a, const string& b) const {
return StringPiece(a).compare(StringPiece(b)) < 0;
}
};
}
class StringSink : public WritableFile {
public:
~StringSink() override {}
const string& contents() const { return contents_; }
absl::Status Close() override { return absl::OkStatus(); }
absl::Status Flush() override { return absl::OkStatus(); }
absl::Status Name(StringPiece* result) const override {
return errors::Unimplemented("StringSink does not support Name()");
}
absl::Status Sync() override { return absl::OkStatus(); }
absl::Status Tell(int64_t* pos) override {
*pos = contents_.size();
return absl::OkStatus();
}
absl::Status Append(StringPiece data) override {
contents_.append(data.data(), data.size());
return absl::OkStatus();
}
private:
string contents_;
};
class StringSource : public RandomAccessFile {
public:
explicit StringSource(const StringPiece& contents)
: contents_(contents.data(), contents.size()), bytes_read_(0) {}
~StringSource() override {}
uint64 Size() const { return contents_.size(); }
absl::Status Name(StringPiece* result) const override {
return errors::Unimplemented("StringSource does not support Name()");
}
absl::Status Read(uint64 offset, size_t n, StringPiece* result,
char* scratch) const override {
if (offset > contents_.size()) {
return errors::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
n = contents_.size() - offset;
}
memcpy(scratch, &contents_[offset], n);
*result = StringPiece(scratch, n);
bytes_read_ += n;
return absl::OkStatus();
}
uint64 BytesRead() const { return bytes_read_; }
private:
string contents_;
mutable uint64 bytes_read_;
};
typedef std::map<string, string, STLLessThan> KVMap;
class Constructor {
public:
explicit Constructor() : data_(STLLessThan()) {}
virtual ~Constructor() {}
void Add(const string& key, const StringPiece& value) {
data_[key] = string(value);
}
void Finish(const Options& options, std::vector<string>* keys, KVMap* kvmap) {
*kvmap = data_;
keys->clear();
for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) {
keys->push_back(it->first);
}
data_.clear();
absl::Status s = FinishImpl(options, *kvmap);
ASSERT_TRUE(s.ok()) << s.ToString();
}
virtual absl::Status FinishImpl(const Options& options,
const KVMap& data) = 0;
virtual Iterator* NewIterator() const = 0;
virtual const KVMap& data() { return data_; }
private:
KVMap data_;
};
class BlockConstructor : public Constructor {
public:
BlockConstructor() : block_(nullptr) {}
~BlockConstructor() override { delete block_; }
absl::Status FinishImpl(const Options& options, const KVMap& data) override {
delete block_;
block_ = nullptr;
BlockBuilder builder(&options);
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
builder.Add(it->first, it->second);
}
data_ = string(builder.Finish());
BlockContents contents;
contents.data = data_;
contents.cacheable = false;
contents.heap_allocated = false;
block_ = new Block(contents);
return absl::OkStatus();
}
Iterator* NewIterator() const override { return block_->NewIterator(); }
private:
string data_;
Block* block_;
};
class TableConstructor : public Constructor {
public:
TableConstructor() : source_(nullptr), table_(nullptr) {}
~TableConstructor() override { Reset(); }
absl::Status FinishImpl(const Options& options, const KVMap& data) override {
Reset();
StringSink sink;
TableBuilder builder(options, &sink);
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
builder.Add(it->first, it->second);
TF_CHECK_OK(builder.status());
}
absl::Status s = builder.Finish();
TF_CHECK_OK(s) << s.ToString();
CHECK_EQ(sink.contents().size(), builder.FileSize());
source_ = new StringSource(sink.contents());
Options table_options;
return Table::Open(table_options, source_, sink.contents().size(), &table_);
}
Iterator* NewIterator() const override { return table_->NewIterator(); }
uint64 ApproximateOffsetOf(const StringPiece& key) const {
return table_->ApproximateOffsetOf(key);
}
uint64 BytesRead() const { return source_->BytesRead(); }
private:
void Reset() {
delete table_;
delete source_;
table_ = nullptr;
source_ = nullptr;
}
StringSource* source_;
Table* table_;
};
enum TestType { TABLE_TEST, BLOCK_TEST };
struct TestArgs {
TestType type;
int restart_interval;
};
static const TestArgs kTestArgList[] = {
{TABLE_TEST, 16}, {TABLE_TEST, 1}, {TABLE_TEST, 1024},
{BLOCK_TEST, 16}, {BLOCK_TEST, 1}, {BLOCK_TEST, 1024},
};
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness : public ::testing::Test {
public:
Harness() : constructor_(nullptr) {}
void Init(const TestArgs& args) {
delete constructor_;
constructor_ = nullptr;
options_ = Options();
options_.block_restart_interval = args.restart_interval;
options_.block_size = 256;
switch (args.type) {
case TABLE_TEST:
constructor_ = new TableConstructor();
break;
case BLOCK_TEST:
constructor_ = new BlockConstructor();
break;
}
}
~Harness() override { delete constructor_; }
void Add(const string& key, const string& value) {
constructor_->Add(key, value);
}
void Test(random::SimplePhilox* rnd, int num_random_access_iters = 200) {
std::vector<string> keys;
KVMap data;
constructor_->Finish(options_, &keys, &data);
TestForwardScan(keys, data);
TestRandomAccess(rnd, keys, data, num_random_access_iters);
}
void TestForwardScan(const std::vector<string>& keys, const KVMap& data) {
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
iter->SeekToFirst();
for (KVMap::const_iterator model_iter = data.begin();
model_iter != data.end(); ++model_iter) {
ASSERT_EQ(ToStringPiecePair(data, model_iter), ToStringPiecePair(iter));
iter->Next();
}
ASSERT_TRUE(!iter->Valid());
delete iter;
}
void TestRandomAccess(random::SimplePhilox* rnd,
const std::vector<string>& keys, const KVMap& data,
int num_random_access_iters) {
static const bool kVerbose = false;
Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid());
KVMap::const_iterator model_iter = data.begin();
if (kVerbose) fprintf(stderr, "---\n");
for (int i = 0; i < num_random_access_iters; i++) {
const int toss = rnd->Uniform(3);
switch (toss) {
case 0: {
if (iter->Valid()) {
if (kVerbose) fprintf(stderr, "Next\n");
iter->Next();
++model_iter;
ASSERT_EQ(ToStringPiecePair(data, model_iter),
ToStringPiecePair(iter));
}
break;
}
case 1: {
if (kVerbose) fprintf(stderr, "SeekToFirst\n");
iter->SeekToFirst();
model_iter = data.begin();
ASSERT_EQ(ToStringPiecePair(data, model_iter),
ToStringPiecePair(iter));
break;
}
case 2: {
string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key);
if (kVerbose)
fprintf(stderr, "Seek '%s'\n", absl::CEscape(key).c_str());
iter->Seek(StringPiece(key));
ASSERT_EQ(ToStringPiecePair(data, model_iter),
ToStringPiecePair(iter));
break;
}
}
}
delete iter;
}
StringPiecePair ToStringPiecePair(const KVMap& data,
const KVMap::const_iterator& it) {
if (it == data.end()) {
return StringPiecePair("END", "");
} else {
return StringPiecePair(it->first, it->second);
}
}
StringPiecePair ToStringPiecePair(const KVMap& data,
const KVMap::const_reverse_iterator& it) {
if (it == data.rend()) {
return StringPiecePair("END", "");
} else {
return StringPiecePair(it->first, it->second);
}
}
StringPiecePair ToStringPiecePair(const Iterator* it) {
if (!it->Valid()) {
return StringPiecePair("END", "");
} else {
return StringPiecePair(it->key(), it->value());
}
}
string PickRandomKey(random::SimplePhilox* rnd,
const std::vector<string>& keys) {
if (keys.empty()) {
return "foo";
} else {
const int index = rnd->Uniform(keys.size());
string result = keys[index];
switch (rnd->Uniform(3)) {
case 0:
break;
case 1: {
if (!result.empty() && result[result.size() - 1] > '\0') {
result[result.size() - 1]--;
}
break;
}
case 2: {
Increment(&result);
break;
}
}
return result;
}
}
private:
Options options_;
Constructor* constructor_;
};
TEST_F(Harness, Empty) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 1, 17);
random::SimplePhilox rnd(&philox);
Test(&rnd);
}
}
TEST_F(Harness, ZeroRestartPointsInBlock) {
char data[sizeof(uint32)];
memset(data, 0, sizeof(data));
BlockContents contents;
contents.data = StringPiece(data, sizeof(data));
contents.cacheable = false;
contents.heap_allocated = false;
Block block(contents);
Iterator* iter = block.NewIterator();
iter->SeekToFirst();
ASSERT_TRUE(!iter->Valid());
iter->Seek("foo");
ASSERT_TRUE(!iter->Valid());
delete iter;
}
TEST_F(Harness, SimpleEmptyKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 1, 17);
random::SimplePhilox rnd(&philox);
Add("", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleSingle) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 2, 17);
random::SimplePhilox rnd(&philox);
Add("abc", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMulti) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 3, 17);
random::SimplePhilox rnd(&philox);
Add("abc", "v");
Add("abcd", "v");
Add("ac", "v2");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMultiBigValues) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 3, 17);
random::SimplePhilox rnd(&philox);
Add("ainitial", "tiny");
Add("anext", string(10000000, 'a'));
Add("anext2", string(10000000, 'b'));
Add("azz", "tiny");
Test(&rnd, 100 );
}
}
TEST_F(Harness, SimpleSpecialKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 4, 17);
random::SimplePhilox rnd(&philox);
Add("\xff\xff", "v3");
Test(&rnd);
}
}
TEST_F(Harness, Randomized) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 5, 17);
random::SimplePhilox rnd(&philox);
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
string v;
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
string(test::RandomString(&rnd, rnd.Skewed(5), &v)));
}
Test(&rnd);
}
}
}
static bool Between(uint64 val, uint64 low, uint64 high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
static_cast<unsigned long long>(val),
static_cast<unsigned long long>(low),
static_cast<unsigned long long>(high));
}
return result;
}
class TableTest {};
TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c;
c.Add("k01", "hello");
c.Add("k02", "hello2");
c.Add("k03", string(10000, 'x'));
c.Add("k04", string(200000, 'x'));
c.Add("k05", string(300000, 'x'));
c.Add("k06", "hello3");
c.Add("k07", string(100000, 'x'));
std::vector<string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 10, 500));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
}
static bool SnappyCompressionSupported() {
string out;
StringPiece in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::Snappy_Compress(in.data(), in.size(), &out);
}
TEST(TableTest, ApproximateOffsetOfCompressed) {
if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n");
return;
}
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
TableConstructor c;
string tmp;
c.Add("k01", "hello");
c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
c.Add("k03", "hello3");
c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp));
std::vector<string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kSnappyCompression;
c.Finish(options, &keys, &kvmap);
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 10, 100));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 4000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 4000));
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 7000));
}
TEST(TableTest, SeekToFirstKeyDoesNotReadTooMuch) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
string tmp;
TableConstructor c;
c.Add("k01", "firstvalue");
c.Add("k03", test::CompressibleString(&rnd, 0.25, 1000000, &tmp));
c.Add("k04", "abc");
std::vector<string> keys;
KVMap kvmap;
Options options;
options.block_size = 1024;
options.compression = kNoCompression;
c.Finish(options, &keys, &kvmap);
Iterator* iter = c.NewIterator();
iter->Seek("k01");
delete iter;
EXPECT_LT(c.BytesRead(), 200);
}
}
} | absl::Status Table::Open(const Options& options, RandomAccessFile* file,
uint64 size, Table** table) {
*table = nullptr;
if (size < Footer::kEncodedLength) {
return errors::DataLoss("file is too short to be an sstable");
}
char footer_space[Footer::kEncodedLength];
StringPiece footer_input;
absl::Status s =
file->Read(size - Footer::kEncodedLength, Footer::kEncodedLength,
&footer_input, footer_space);
if (!s.ok()) return s;
Footer footer;
s = footer.DecodeFrom(&footer_input);
if (!s.ok()) return s;
BlockContents contents;
Block* index_block = nullptr;
if (s.ok()) {
s = ReadBlock(file, footer.index_handle(), &contents);
}
if (s.ok()) {
index_block = new Block(contents);
Rep* rep = new Table::Rep;
rep->options = options;
rep->file = file;
rep->metaindex_handle = footer.metaindex_handle();
rep->index_block = index_block;
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
*table = new Table(rep);
} else {
if (index_block) delete index_block;
}
return s;
} | TEST_F(Harness, Empty) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 1, 17);
random::SimplePhilox rnd(&philox);
Test(&rnd);
}
}
TEST_F(Harness, ZeroRestartPointsInBlock) {
char data[sizeof(uint32)];
memset(data, 0, sizeof(data));
BlockContents contents;
contents.data = StringPiece(data, sizeof(data));
contents.cacheable = false;
contents.heap_allocated = false;
Block block(contents);
Iterator* iter = block.NewIterator();
iter->SeekToFirst();
ASSERT_TRUE(!iter->Valid());
iter->Seek("foo");
ASSERT_TRUE(!iter->Valid());
delete iter;
}
TEST_F(Harness, SimpleEmptyKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 1, 17);
random::SimplePhilox rnd(&philox);
Add("", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleSingle) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 2, 17);
random::SimplePhilox rnd(&philox);
Add("abc", "v");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMulti) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 3, 17);
random::SimplePhilox rnd(&philox);
Add("abc", "v");
Add("abcd", "v");
Add("ac", "v2");
Test(&rnd);
}
}
TEST_F(Harness, SimpleMultiBigValues) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 3, 17);
random::SimplePhilox rnd(&philox);
Add("ainitial", "tiny");
Add("anext", string(10000000, 'a'));
Add("anext2", string(10000000, 'b'));
Add("azz", "tiny");
Test(&rnd, 100 );
}
}
TEST_F(Harness, SimpleSpecialKey) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 4, 17);
random::SimplePhilox rnd(&philox);
Add("\xff\xff", "v3");
Test(&rnd);
}
}
TEST_F(Harness, Randomized) {
for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]);
random::PhiloxRandom philox(testing::RandomSeed() + 5, 17);
random::SimplePhilox rnd(&philox);
for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) {
fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
int(kNumTestArgs), num_entries);
}
for (int e = 0; e < num_entries; e++) {
string v;
Add(test::RandomKey(&rnd, rnd.Skewed(4)),
string(test::RandomString(&rnd, rnd.Skewed(5), &v)));
}
Test(&rnd);
}
}
} |
#include "xla/pjrt/tf_pjrt_client.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
namespace xla {
TfPjRtBuffer::TfPjRtBuffer(TfPjRtClient* client,
std::unique_ptr<PjRtBuffer> wrapped)
: client_(client), wrapped_(std::move(wrapped)) {
client_->TrackBuffer(this);
}
TfPjRtBuffer::~TfPjRtBuffer() { client_->UntrackBuffer(this); }
PjRtClient* TfPjRtBuffer::client() const { return client_; }
PjRtClient* TfPjRtExecutable::client() const { return client_; }
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfPjRtBuffer::CopyToDevice(
PjRtDevice* dst_device) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtBuffer> result,
wrapped_->CopyToDevice(dst_device));
return std::unique_ptr<PjRtBuffer>(
std::make_unique<TfPjRtBuffer>(client_, std::move(result)));
}
TfPjRtExecutable::TfPjRtExecutable(
TfPjRtClient* client, std::unique_ptr<PjRtLoadedExecutable> wrapped)
: client_(client), wrapped_(std::move(wrapped)) {}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
TfPjRtExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
std::vector<std::vector<PjRtBuffer*>> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (auto& handles : argument_handles) {
unwrapped_argument_handles.emplace_back();
auto& unwrapped_handles = unwrapped_argument_handles.back();
unwrapped_handles.reserve(handles.size());
for (PjRtBuffer* buffer : handles) {
unwrapped_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->Execute(unwrapped_argument_handles,
options, returned_futures));
for (auto& buffer_list : out) {
for (std::unique_ptr<PjRtBuffer>& buffer : buffer_list) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
}
return out;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfPjRtExecutable::ExecuteSharded(absl::Span<PjRtBuffer* const> argument_handles,
PjRtDevice* device,
const ExecuteOptions& options,
std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
std::vector<PjRtBuffer*> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (PjRtBuffer* buffer : argument_handles) {
unwrapped_argument_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->ExecuteSharded(
unwrapped_argument_handles, device, options,
returned_future, fill_future));
for (std::unique_ptr<PjRtBuffer>& buffer : out) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
return out;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfPjRtExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
std::vector<PjRtBuffer*> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (PjRtBuffer* buffer : argument_handles) {
unwrapped_argument_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->ExecutePortable(
unwrapped_argument_handles, device, options,
returned_future, fill_future));
for (std::unique_ptr<PjRtBuffer>& buffer : out) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
return out;
}
TfPjRtClient::TfPjRtClient(std::unique_ptr<PjRtClient> wrapped)
: wrapped_(std::move(wrapped)) {
LOG(INFO) << "TfPjRtClient created.";
int num_mutexes = wrapped_->addressable_device_count();
alive_buffers_ = std::vector<DeviceBuffers>(num_mutexes);
for (int i = 0; i < num_mutexes; ++i) {
mutex_id_from_device_id_.insert(
{wrapped_->addressable_devices()[i]->id(), i});
}
}
TfPjRtClient::~TfPjRtClient() { LOG(INFO) << "TfPjRtClient destroyed."; }
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfPjRtClient::WrapBuffer(
absl::StatusOr<std::unique_ptr<PjRtBuffer>> to_wrap) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtBuffer> buffer, std::move(to_wrap));
return std::unique_ptr<PjRtBuffer>(
std::make_unique<TfPjRtBuffer>(this, std::move(buffer)));
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
TfPjRtClient::WrapExecutable(
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> to_wrap) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
std::move(to_wrap));
return std::unique_ptr<PjRtLoadedExecutable>(
std::make_unique<TfPjRtExecutable>(this, std::move(executable)));
}
static int GetMutexId(
const TfPjRtBuffer* buffer,
const absl::flat_hash_map<int, int>& mutex_id_from_device_id) {
auto iters = mutex_id_from_device_id.find(buffer->wrapped()->device()->id());
CHECK(iters != mutex_id_from_device_id.end())
<< "Mutex id not found for device id: "
<< buffer->wrapped()->device()->id();
return iters->second;
}
void TfPjRtClient::TrackBuffer(TfPjRtBuffer* buffer) {
int mutex_id = GetMutexId(buffer, mutex_id_from_device_id_);
{
absl::MutexLock lock(&alive_buffers_[mutex_id].mu);
alive_buffers_[mutex_id].alive_buffers.insert(buffer);
}
}
void TfPjRtClient::UntrackBuffer(const TfPjRtBuffer* buffer) {
if (buffer->wrapped() == nullptr) {
return;
}
int mutex_id = GetMutexId(buffer, mutex_id_from_device_id_);
{
absl::MutexLock lock(&alive_buffers_[mutex_id].mu);
alive_buffers_[mutex_id].alive_buffers.erase(buffer);
}
}
void TfPjRtClient::DestroyWrappedBuffersAndClient() {
int num_mutexes = alive_buffers_.size();
for (int i = 0; i < num_mutexes; ++i) {
absl::MutexLock lock(&alive_buffers_[i].mu);
for (auto* buffer : alive_buffers_[i].alive_buffers) {
buffer->DestroyWrappedBuffer();
}
}
wrapped_.reset(nullptr);
LOG(INFO) << "TfPjRtClient::DestroyWrappedBuffersAndClient completed.";
}
std::unique_ptr<TfPjRtClient> TfPjRtClient::CreateTfPjRtClient(
std::unique_ptr<PjRtClient> wrapped) {
return std::make_unique<TfPjRtClient>(std::move(wrapped));
}
} | #include "xla/pjrt/tf_pjrt_client.h"
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/literal_util.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/service/hlo_parser.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(TfClientTest, ExecuteAndHloSnapshot) {
constexpr char kProgram[] = R"(
HloModule add
ENTRY add {
x = f32[3,2] parameter(0)
y = f32[3,2] parameter(1)
ROOT add = f32[3,2] add(x, y)
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(true));
client = TfPjRtClient::CreateTfPjRtClient(std::move(client));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
std::string dir = tsl::testing::TmpDir();
xla::CompileOptions options;
auto* debug_opts = options.executable_build_options.mutable_debug_options();
debug_opts->set_xla_dump_to(dir);
debug_opts->set_xla_dump_hlo_snapshots(true);
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, options));
std::vector<float> data1{1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
std::vector<float> data2{10.0, 20.0, 30.0, 40.0, 50.0, 60.0};
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer1,
client->BufferFromHostBuffer(
data1.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer2,
client->BufferFromHostBuffer(
data2.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute(
{{buffer1.get(), buffer2.get()}},
{});
ASSERT_TRUE(result.ok());
tsl::FileSystem* fs;
ASSERT_TRUE(tsl::Env::Default()->GetFileSystemForFile(dir, &fs).ok());
std::vector<std::string> paths;
ASSERT_TRUE(fs->GetMatchingPaths(dir + "/*.snapshot.*.pb", &paths).ok());
ASSERT_EQ(paths.size(), 1);
HloSnapshot snapshot;
ASSERT_TRUE(
tsl::ReadBinaryProto(tsl::Env::Default(), paths[0], &snapshot).ok());
ASSERT_EQ(*Literal::CreateFromProto(snapshot.arguments(0)),
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.arguments(1)),
LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}, {50.0, 60.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.result()),
LiteralUtil::CreateR2<float>({{11.0, 22.0}, {33.0, 44.0}, {55.0, 66.0}}));
auto* tf_pjrt_client =
tensorflow::down_cast<xla::TfPjRtClient*>(client.get());
tf_pjrt_client->DestroyWrappedBuffersAndClient();
}
}
} | void TfPjRtClient::DestroyWrappedBuffersAndClient() {
int num_mutexes = alive_buffers_.size();
for (int i = 0; i < num_mutexes; ++i) {
absl::MutexLock lock(&alive_buffers_[i].mu);
for (auto* buffer : alive_buffers_[i].alive_buffers) {
buffer->DestroyWrappedBuffer();
}
}
wrapped_.reset(nullptr);
LOG(INFO) << "TfPjRtClient::DestroyWrappedBuffersAndClient completed.";
} | #include "xla/pjrt/tf_pjrt_client.h"
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/literal_util.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/service/hlo_parser.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(TfClientTest, ExecuteAndHloSnapshot) {
constexpr char kProgram[] = R"(
HloModule add
ENTRY add {
x = f32[3,2] parameter(0)
y = f32[3,2] parameter(1)
ROOT add = f32[3,2] add(x, y)
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(true));
client = TfPjRtClient::CreateTfPjRtClient(std::move(client));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
std::string dir = tsl::testing::TmpDir();
xla::CompileOptions options;
auto* debug_opts = options.executable_build_options.mutable_debug_options();
debug_opts->set_xla_dump_to(dir);
debug_opts->set_xla_dump_hlo_snapshots(true);
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, options));
std::vector<float> data1{1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
std::vector<float> data2{10.0, 20.0, 30.0, 40.0, 50.0, 60.0};
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer1,
client->BufferFromHostBuffer(
data1.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer2,
client->BufferFromHostBuffer(
data2.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute(
{{buffer1.get(), buffer2.get()}},
{});
ASSERT_TRUE(result.ok());
tsl::FileSystem* fs;
ASSERT_TRUE(tsl::Env::Default()->GetFileSystemForFile(dir, &fs).ok());
std::vector<std::string> paths;
ASSERT_TRUE(fs->GetMatchingPaths(dir + "/*.snapshot.*.pb", &paths).ok());
ASSERT_EQ(paths.size(), 1);
HloSnapshot snapshot;
ASSERT_TRUE(
tsl::ReadBinaryProto(tsl::Env::Default(), paths[0], &snapshot).ok());
ASSERT_EQ(*Literal::CreateFromProto(snapshot.arguments(0)),
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.arguments(1)),
LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}, {50.0, 60.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.result()),
LiteralUtil::CreateR2<float>({{11.0, 22.0}, {33.0, 44.0}, {55.0, 66.0}}));
auto* tf_pjrt_client =
tensorflow::down_cast<xla::TfPjRtClient*>(client.get());
tf_pjrt_client->DestroyWrappedBuffersAndClient();
} |
#include "xla/service/hlo_parser.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_domain_metadata.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_lexer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/name_uniquer.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/gtl/map_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
using absl::StrAppend;
using absl::StrCat;
using absl::StrFormat;
using absl::StrJoin;
using std::nullopt;
using std::optional;
HloSchedule ScheduleFromInstructionOrder(HloModule* module) {
HloSchedule schedule(module);
for (HloComputation* computation : module->computations()) {
if (!computation->IsFusionComputation()) {
for (HloInstruction* instruction : computation->instructions()) {
schedule.GetOrCreateSequence(computation).push_back(instruction);
}
}
}
return schedule;
}
bool CanInferShape(HloOpcode code) {
switch (code) {
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kAtan2:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kCeil:
case HloOpcode::kCholesky:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConvolution:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kDivide:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFft:
case HloOpcode::kFloor:
case HloOpcode::kGather:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kGetTupleElement:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kAnd:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kMap:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kPad:
case HloOpcode::kPartitionId:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReduce:
case HloOpcode::kRemainder:
case HloOpcode::kReplicaId:
case HloOpcode::kReverse:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTranspose:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kTopK:
return true;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kDynamicReshape:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduceScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kSlice:
case HloOpcode::kBitcast:
case HloOpcode::kBitcastConvert:
case HloOpcode::kConstant:
case HloOpcode::kConvert:
case HloOpcode::kCustomCall:
case HloOpcode::kFusion:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kReducePrecision:
case HloOpcode::kReshape:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kStochasticConvert:
return false;
}
}
class HloParserImpl : public HloParser {
public:
using LocTy = HloLexer::LocTy;
using BoolList = absl::InlinedVector<bool, 1>;
explicit HloParserImpl(absl::string_view str) : lexer_(str) {}
absl::Status Run(HloModule* module) override;
std::string GetError() const { return StrJoin(error_, "\n"); }
absl::StatusOr<Shape> ParseShapeOnly();
absl::StatusOr<Layout> ParseLayoutOnly();
absl::StatusOr<HloSharding> ParseShardingOnly();
absl::StatusOr<FrontendAttributes> ParseFrontendAttributesOnly();
absl::StatusOr<StatisticsViz> ParseStatisticsVizOnly();
absl::StatusOr<std::vector<bool>> ParseParameterReplicationOnly();
absl::StatusOr<BoolList> ParseBooleanListOrSingleBooleanOnly();
absl::StatusOr<Window> ParseWindowOnly();
absl::StatusOr<ConvolutionDimensionNumbers>
ParseConvolutionDimensionNumbersOnly();
absl::StatusOr<PaddingConfig> ParsePaddingConfigOnly();
absl::StatusOr<std::vector<ReplicaGroup>> ParseReplicaGroupsOnly();
private:
enum class AttrTy {
kBool,
kInt64,
kInt32,
kFloat,
kString,
kLiteral,
kBracedInt64List,
kBracedInt64ListList,
kHloComputation,
kBracedHloComputationList,
kFftType,
kPaddingType,
kComparisonDirection,
kComparisonType,
kWindow,
kConvolutionDimensionNumbers,
kSharding,
kFrontendAttributes,
kStatisticsViz,
kBracedBoolListOrBool,
kParameterReplication,
kInstructionList,
kSliceRanges,
kPaddingConfig,
kMetadata,
kFusionKind,
kDistribution,
kDomain,
kPrecisionList,
kShape,
kShapeList,
kEnum,
kRandomAlgorithm,
kPrecisionAlgorithm,
kAliasing,
kBufferDonor,
kComputationLayout,
kInstructionAliasing,
kCustomCallSchedule,
kCustomCallApiVersion,
kSparsityDescriptor,
kStringOrJsonDict,
};
struct AttrConfig {
bool required;
AttrTy attr_type;
void* result;
};
using InstrNameTable =
absl::flat_hash_map<std::string, std::pair<HloInstruction*, LocTy>>;
InstrNameTable& current_name_table() { return scoped_name_tables_.back(); }
std::pair<HloInstruction*, LocTy>* FindInstruction(
const std::string& name, const optional<Shape>& shape = nullopt);
bool ParseSingleInstruction(HloModule* module);
bool ParseHloModule(HloModule* module,
bool parse_module_without_header = false);
bool ParseComputations(HloModule* module);
bool ParseComputation(HloComputation** entry_computation);
bool ParseInstructionList(HloComputation** computation,
const std::string& computation_name);
bool ParseInstruction(HloComputation::Builder* builder,
std::string* root_name);
bool ParseInstructionRhs(HloComputation::Builder* builder, std::string name,
LocTy name_loc, bool allow_attributes = true);
bool ParseControlPredecessors(HloInstruction* instruction);
bool ParseLiteral(Literal* literal);
bool ParseLiteral(Literal* literal, const Shape& shape);
bool ParseTupleLiteral(Literal* literal, const Shape& shape);
bool ParseNonTupleLiteral(Literal* literal, const Shape& shape);
bool ParseDenseLiteral(Literal* literal, const Shape& shape);
HloInstruction* CreateInstruction(
HloComputation::Builder* builder, absl::string_view name,
std::optional<Shape> shape, HloOpcode opcode,
std::optional<HloOpcode> async_wrapped_opcode,
absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes,
std::vector<HloInstruction*>* preset_operands = nullptr);
bool SetValueInLiteral(LocTy loc, int64_t value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, double value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, bool value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, std::complex<double> value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool SetValueInLiteralHelper(LocTy loc, ParsedElemT value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool CheckParsedValueIsInRange(LocTy loc, ParsedElemT value);
template <typename LiteralNativeT>
bool CheckParsedValueIsInRange(LocTy loc, std::complex<double> value);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder, int expected_size);
struct SliceRanges {
std::vector<int64_t> starts;
std::vector<int64_t> limits;
std::vector<int64_t> strides;
};
struct DomainData {
std::unique_ptr<DomainMetadata> entry_metadata;
std::unique_ptr<DomainMetadata> exit_metadata;
};
bool ParseAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes = true);
bool ParseSubAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs);
bool ParseAttributeHelper(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
absl::flat_hash_set<std::string>* seen_attrs);
bool CopyAttributeToProtoMessage(
absl::flat_hash_set<std::string> non_proto_attrs,
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
tsl::protobuf::Message* message);
bool ParseAttributesAsProtoMessage(
const absl::flat_hash_map<std::string, AttrConfig>& non_proto_attrs,
tsl::protobuf::Message* message);
bool ParseComputationName(HloComputation** value);
bool ParseInstructionNames(std::vector<HloInstruction*>* instructions);
bool ParseWindow(Window* window, bool expect_outer_curlies);
bool ParseConvolutionDimensionNumbers(ConvolutionDimensionNumbers* dnums);
bool ParsePaddingConfig(PaddingConfig* padding);
bool ParseMetadata(OpMetadata* metadata);
bool ParseSingleOrListMetadata(
tsl::protobuf::RepeatedPtrField<OpMetadata>* metadata);
bool ParseOpShardingType(OpSharding::Type* type);
bool ParseListShardingType(std::vector<OpSharding::Type>* types);
bool ParseSharding(OpSharding* sharding);
bool ParseFrontendAttributes(FrontendAttributes* frontend_attributes);
bool ParseStatisticsViz(StatisticsViz* statistics_viz);
bool ParseSingleSharding(OpSharding* sharding, bool lbrace_pre_lexed);
bool ParseParameterReplication(ParameterReplication* parameter_replication);
bool ParseBooleanListOrSingleBoolean(BoolList* boolean_list);
bool ParseReplicaGroupsOnly(std::vector<ReplicaGroup>* replica_groups);
bool ParseDomain(DomainData* domain);
bool ParseDxD(const std::string& name, std::vector<int64_t>* result);
bool ParseWindowPad(std::vector<std::vector<int64_t>>* pad);
bool ParseSliceRanges(SliceRanges* result);
bool ParsePrecisionList(std::vector<PrecisionConfig::Precision>* result);
bool ParseHloComputation(HloComputation** result);
bool ParseHloComputationList(std::vector<HloComputation*>* result);
bool ParseShapeList(std::vector<Shape>* result);
bool ParseInt64List(TokKind start, TokKind end, TokKind delim,
std::vector<int64_t>* result);
bool ParseInt64ListList(TokKind start, TokKind end, TokKind delim,
std::vector<std::vector<int64_t>>* result);
bool ParseList(TokKind start, TokKind end, TokKind delim,
absl::FunctionRef<bool()> parse_and_add_item);
bool ParseParamListToShape(Shape* shape, LocTy* shape_loc);
bool ParseParamList();
bool ParseName(std::string* result);
bool ParseAttributeName(std::string* result);
bool ParseString(std::string* result);
bool ParseJsonDict(std::string* result);
bool ParseDimensionSizes(std::vector<int64_t>* dimension_sizes,
std::vector<bool>* dynamic_dimensions);
bool ParseShape(Shape* result);
bool ParseLayout(Layout* layout);
bool ParseLayoutIntAttribute(int64_t* attr_value,
absl::string_view attr_description);
bool ParseDimLevelTypes(
absl::InlinedVector<DimLevelType, InlineRank()>* dim_level_types,
absl::InlinedVector<bool, InlineRank()>* dim_unique,
absl::InlinedVector<bool, InlineRank()>* dim_ordered);
bool ParseTiles(std::vector<Tile>* tiles);
bool ParseSplitConfigs(std::vector<SplitConfig>& split_configs);
bool ParsePhysicalShape(Shape* physical_shape);
bool ParseOpcode(HloOpcode* opcode,
std::optional<HloOpcode>* async_wrapped_opcode);
bool ParseFftType(FftType* result);
bool ParsePaddingType(PaddingType* result);
bool ParsePrimitiveType(PrimitiveType* result);
bool ParseComparisonDirection(ComparisonDirection* result);
bool ParseComparisonType(Comparison::Type* result);
bool ParseFusionKind(HloInstruction::FusionKind* result);
bool ParseRandomDistribution(RandomDistribution* result);
bool ParseRandomAlgorithm(RandomAlgorithm* result);
bool ParsePrecision(PrecisionConfig::Precision* result);
bool ParseAlgorithm(PrecisionConfig::Algorithm* result);
bool ParseInt64(int64_t* result);
bool ParseDouble(double* result);
bool ParseComplex(std::complex<double>* result);
bool ParseBool(bool* result);
bool ParseToken(TokKind kind, const std::string& msg);
bool ParseUnsignedIntegerType(PrimitiveType* primitive_type);
using AliasingData =
absl::flat_hash_map<ShapeIndex, HloInputOutputAliasConfig::Alias>;
using BufferDonor = absl::flat_hash_set<HloBufferDonorConfig::BufferDonor>;
bool ParseAliasing(AliasingData* data);
bool ParseBufferDonor(BufferDonor* data);
bool ParseComputationLayout(ComputationLayout* computation_layout);
bool ParseInstructionOutputOperandAliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>*
aliasing_output_operand_pairs);
bool ParseCustomCallSchedule(CustomCallSchedule* result);
bool ParseCustomCallApiVersion(CustomCallApiVersion* result);
bool ParseSparsityDescriptor(std::vector<SparsityDescriptor>* result);
bool ParseShapeIndex(ShapeIndex* out);
bool CanBeShape();
bool CanBeParamListToShape();
bool TokenError(absl::string_view msg);
bool Error(LocTy loc, absl::string_view msg);
bool EatIfPresent(TokKind kind);
bool AddInstruction(const std::string& name, HloInstruction* instruction,
LocTy name_loc);
bool AddComputation(const std::string& name, HloComputation* computation,
LocTy name_loc);
HloLexer lexer_;
std::vector<InstrNameTable> scoped_name_tables_;
class Scope {
public:
explicit Scope(std::vector<InstrNameTable>* scoped_name_tables)
: scoped_name_tables_(scoped_name_tables) {
scoped_name_tables_->emplace_back();
}
~Scope() { scoped_name_tables_->pop_back(); }
private:
std::vector<InstrNameTable>* scoped_name_tables_;
};
absl::flat_hash_map<std::string, std::pair<HloComputation*, LocTy>>
computation_pool_;
std::vector<std::unique_ptr<HloComputation>> computations_;
std::vector<std::string> error_;
std::function<std::pair<HloInstruction*, LocTy>*(const std::string& name,
const Shape& shape)>
create_missing_instruction_;
NameUniquer name_uniquer_{"."};
};
bool SplitToInt64s(absl::string_view s, char delim, std::vector<int64_t>* out) {
for (const auto& split : absl::StrSplit(s, delim)) {
int64_t val;
if (!absl::SimpleAtoi(split, &val)) {
return false;
}
out->push_back(val);
}
return true;
}
std::vector<ReplicaGroup> CreateReplicaGroups(
absl::Span<const std::vector<int64_t>> groups) {
std::vector<ReplicaGroup> replica_groups;
absl::c_transform(groups, std::back_inserter(replica_groups),
[](const std::vector<int64_t>& ids) {
ReplicaGroup group;
*group.mutable_replica_ids() = {ids.begin(), ids.end()};
return group;
});
return replica_groups;
}
bool HloParserImpl::Error(LocTy loc, absl::string_view msg) {
auto line_col = lexer_.GetLineAndColumn(loc);
const unsigned line = line_col.first;
const unsigned col = line_col.second;
std::vector<std::string> error_lines;
error_lines.push_back(
StrCat("was parsing ", line, ":", col, ": error: ", msg));
error_lines.emplace_back(lexer_.GetLine(loc));
error_lines.push_back(col == 0 ? "" : StrCat(std::string(col - 1, ' '), "^"));
error_.push_back(StrJoin(error_lines, "\n"));
VLOG(1) << "Error: " << error_.back();
return false;
}
bool HloParserImpl::TokenError(absl::string_view msg) {
return Error(lexer_.GetLoc(), msg);
}
absl::Status HloParserImpl::Run(HloModule* module) {
lexer_.Lex();
if ((lexer_.GetKind() == TokKind::kw_HloModule) ||
(lexer_.GetKind() == TokKind::kw_ENTRY) ||
(lexer_.LookAhead() == TokKind::kLbrace)) {
bool parse_module_without_header =
(lexer_.GetKind() == TokKind::kw_HloModule) ? false : true;
if (!ParseHloModule(module, parse_module_without_header)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a HloModule:\n%s",
GetError());
}
return absl::OkStatus();
}
if (!ParseSingleInstruction(module)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a single "
"HloInstruction:\n%s",
GetError());
}
return absl::OkStatus();
}
std::pair<HloInstruction*, HloParserImpl::LocTy>*
HloParserImpl::FindInstruction(const std::string& name,
const optional<Shape>& shape) {
std::pair<HloInstruction*, LocTy>* instr = nullptr;
if (!name.empty()) {
instr = tsl::gtl::FindOrNull(current_name_table(), name);
}
if (instr == nullptr && create_missing_instruction_ != nullptr &&
scoped_name_tables_.size() == 1) {
if (!shape.has_value()) {
Error(lexer_.GetLoc(),
"Operand had no shape in HLO text; cannot create parameter for "
"single-instruction module.");
return nullptr;
}
return create_missing_instruction_(name, *shape);
}
if (instr != nullptr && shape.has_value() &&
!ShapeUtil::Compatible(instr->first->shape(), shape.value())) {
Error(
lexer_.GetLoc(),
StrCat("The declared operand shape ",
ShapeUtil::HumanStringWithLayout(shape.value()),
" is not compatible with the shape of the operand instruction ",
ShapeUtil::HumanStringWithLayout(instr->first->shape()), "."));
return nullptr;
}
return instr;
}
bool HloParserImpl::ParseShapeIndex(ShapeIndex* out) {
if (!ParseToken(TokKind::kLbrace, "Expects '{' at the start of ShapeIndex")) {
return false;
}
std::vector<int64_t> idxs;
while (lexer_.GetKind() != TokKind::kRbrace) {
int64_t idx;
if (!ParseInt64(&idx)) {
return false;
}
idxs.push_back(idx);
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace, "Expects '}' at the end of ShapeIndex")) {
return false;
}
*out = ShapeIndex(idxs.begin(), idxs.end());
return true;
}
bool HloParserImpl::ParseAliasing(AliasingData* data) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of aliasing description")) {
return false;
}
while (lexer_.GetKind() != TokKind::kRbrace) {
ShapeIndex out;
if (!ParseShapeIndex(&out)) {
return false;
}
std::string errmsg =
"Expected format: <output_shape_index>: (<input_param>, "
"<input_param_shape_index>) OR <output_shape_index>: <input_param>";
if (!ParseToken(TokKind::kColon, errmsg)) {
return false;
}
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t param_num;
ParseInt64(¶m_num);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex param_idx;
if (!ParseShapeIndex(¶m_idx)) {
return false;
}
HloInputOutputAliasConfig::AliasKind alias_kind =
HloInputOutputAliasConfig::kMayAlias;
if (EatIfPresent(TokKind::kComma)) {
std::string type;
ParseName(&type);
if (type == "must-alias") {
alias_kind = HloInputOutputAliasConfig::kMustAlias;
} else if (type == "may-alias") {
alias_kind = HloInputOutputAliasConfig::kMayAlias;
} else {
return TokenError("Unexpected aliasing kind; expected SYSTEM or USER");
}
}
data->emplace(std::piecewise_construct, std::forward_as_tuple(out),
std::forward_as_tuple(param_num, param_idx, alias_kind));
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of aliasing description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseBufferDonor(BufferDonor* data) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of buffer donor description")) {
return false;
}
std::string errmsg =
"Expected format: (<input_param>, <input_param_shape_index>)";
while (lexer_.GetKind() != TokKind::kRbrace) {
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t param_num;
ParseInt64(¶m_num);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex param_idx;
if (!ParseShapeIndex(¶m_idx)) {
return false;
}
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
data->emplace(param_num, param_idx);
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of buffer donor description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseComputationLayout(
ComputationL | #include "xla/service/hlo_parser.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_frontend_attributes.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::absl::string_view;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
struct TestData {
std::string test_name;
std::string module_string;
int64_t replica_count = 1;
bool enable_verification = true;
};
std::string TestDataToString(const ::testing::TestParamInfo<TestData>& data) {
return data.param.test_name;
}
struct NonRoundtripTestData {
std::string test_name;
std::string input_module_string;
std::string output_module_string;
};
std::string NonRoundtripTestDataToString(
const ::testing::TestParamInfo<NonRoundtripTestData>& data) {
return data.param.test_name;
}
std::vector<TestData> CreateTestCases() {
return std::vector<TestData>({
{
"AxpyParam",
R"(HloModule axpy_module, entry_computation_layout={(f32[], f32[2,4]{1,0}, f32[2,4]{1,0})->f32[2,4]{1,0}}
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)"
},
{
"ParamReplication",
R"(HloModule param_replication_module, entry_computation_layout={(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))->(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))}
ENTRY %param_replication (a: f32[], b: (f32[2,4], (f32[2,4]))) -> (f32[], (f32[2,4], (f32[2,4]))) {
%a = f32[] parameter(0), parameter_replication={true}
%b = (f32[2,4]{1,0}, (f32[2,4]{1,0})) parameter(1), parameter_replication={false,true}
ROOT %tuple = (f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0}))) tuple(f32[] %a, (f32[2,4]{1,0}, (f32[2,4]{1,0})) %b)
}
)"
},
{
"ConstantPred",
R"(HloModule constant_pred_module, entry_computation_layout={()->pred[]}
ENTRY %constant_pred () -> pred[] {
ROOT %constant = pred[] constant(true), metadata={op_type="const" op_name="\"it\'s not a problem\n" source_file="path/to/test.cc" source_line=68}, backend_config="foo\" bar"
}
)"
},
{
"ConstantPredArray",
R"(HloModule module, entry_computation_layout={()->pred[2,3]{1,0}}
ENTRY %constant_pred_array () -> pred[2,3] {
ROOT %constant = pred[2,3]{1,0} constant({ { 0, 1, 0 }, { 1, 0, 1 } })
}
)"
},
{
"ConstantS32",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42)
}
)"
},
{
"ConstantS32WithStatistics",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42), statistics={visualizing_index=1,stat-1=33,stat-2=44}
}
)"
},
{
"ConstantF32",
R"(HloModule ConstantF32_module, entry_computation_layout={()->f32[]}
ENTRY %ConstantF32.v4 () -> f32[] {
ROOT %constant = f32[] constant(42), backend_config="this is a configuration"
}
)"
},
{
"ConstantF32R1Empty",
R"(HloModule ConstantF32Empty_module, entry_computation_layout={()->f32[0]{0}}
ENTRY %ConstantF32Empty.v4 () -> f32[0] {
ROOT %constant = f32[0]{0} constant({})
}
)"
},
{
"ConstantF32R4Empty",
R"(HloModule ConstantF32R4Empty_module, entry_computation_layout={()->f32[2,0,4,3]{3,2,1,0}}
ENTRY %ConstantF32R4Empty.v4 () -> f32[2,0,4,3] {
ROOT %constant = f32[2,0,4,3]{3,2,1,0} constant({ { }, { } })
}
)"
},
{
"Constant4D",
R"(HloModule Small_3x2x1x1_module, entry_computation_layout={()->f32[3,2,1,1]{3,2,1,0}}
ENTRY %Small_3x2x1x1.v1 () -> f32[3,2,1,1] {
ROOT %constant = f32[3,2,1,1]{3,2,1,0} constant({ { { {-1} }, { {4.1} } }, { { {2} }, { {4.1} } }, { { {5} }, { {4.4} } } })
}
)"
},
{
"ConstantNonFinite",
R"(HloModule IsFiniteR1F32s_module, entry_computation_layout={()->pred[6]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> pred[6] {
%constant = f32[6]{0} constant({nan, 7, nan, -1, inf, -inf})
ROOT %is-finite = pred[6]{0} is-finite(f32[6]{0} %constant)
}
)"
},
{
"ConstantNonFiniteE4M3",
R"(HloModule ConstantR1F8E4M3FNs_module, entry_computation_layout={()->f8e4m3fn[3]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3fn[3] {
ROOT %constant = f8e4m3fn[3]{0} constant({nan, 7, -nan})
}
)"
},
{
"ConstantNonFiniteE4M3B11",
R"(HloModule ConstantR1F8E4M3B11_module, entry_computation_layout={()->f8e4m3b11fnuz[2]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3b11fnuz[2] {
ROOT %constant = f8e4m3b11fnuz[2]{0} constant({-nan, 7})
}
)"
},
{
"ConstantF16",
R"(HloModule ConstantF16_module, entry_computation_layout={()->f16[]}
ENTRY %ConstantF16.v4 () -> f16[] {
ROOT %constant = f16[] constant(500)
}
)"
},
{
"BF16",
R"(HloModule BF16, entry_computation_layout={()->bf16[]}
ENTRY %BF16.v4 () -> bf16[] {
ROOT %constant = bf16[] constant(500)
}
)"
},
{
"AddConstants",
R"(HloModule add_constants_module, entry_computation_layout={()->f32[]}
ENTRY %add_constants () -> f32[] {
%constant = f32[] constant(3.14)
ROOT %add = f32[] add(f32[] %constant, f32[] %constant)
}
)"
},
{
"TupleConstant",
R"(HloModule TupleConstant_module, entry_computation_layout={()->(f32[2,1]{1,0}, f32[2]{0})}
ENTRY %TupleConstant.v1 () -> (f32[2,1], f32[2]) {
ROOT %constant = (f32[2,1]{1,0}, f32[2]{0}) constant(( { {1}, {2} }, {2, 42} ))
}
)"
},
{
"SelectR1F32",
R"(HloModule SelectR1F32WithCmpR1F32sFromParamsSmall_module, entry_computation_layout={(f32[4]{0}, f32[4]{0})->f32[4]{0}}
ENTRY %SelectR1F32WithCmpR1F32sFromParamsSmall.v4 (v1: f32[4], v2: f32[4]) -> f32[4] {
%v1 = f32[4]{0} parameter(0), sharding={maximal device=1}
%v2 = f32[4]{0} parameter(1), sharding={maximal device=1}
%greater-than = pred[4]{0} compare(f32[4]{0} %v1, f32[4]{0} %v2), direction=GT, type=TOTALORDER, sharding={replicated}
ROOT %select = f32[4]{0} select(pred[4]{0} %greater-than, f32[4]{0} %v1, f32[4]{0} %v2), sharding={replicated}
}
)"
},
{
"EmptyTupleCreate",
R"(HloModule EmptyTupleCreate_module, entry_computation_layout={()->()}
ENTRY %EmptyTupleCreate.v1 () -> () {
ROOT %tuple = () tuple()
}
)"
},
{
"TupleCreate",
R"(HloModule TupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)"
},
{
"LargeTupleRoundTrip",
R"(HloModule LargeTupleRoundTrip_module, entry_computation_layout={(f32[])->(f32[], f32[], f32[], f32[], f32[], f32[])}
ENTRY %TupleCreate.v4 (v: f32[]) -> (f32[], f32[], f32[], f32[], f32[], f32[]) {
%v = f32[] parameter(0)
ROOT %tuple = (f32[], f32[], f32[], f32[], f32[], f32[]) tuple(f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v)
}
)"
},
{
"ShardedTupleCreate",
R"(HloModule ShardedTupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %ShardedTupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0), sharding={manual}
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3), sharding={{manual}, {maximal device=0}, {replicated}}
}
)"
},
{
"DomainParsing",
R"(HloModule DomainParsing_module, entry_computation_layout={(f32[])->f32[]}
ENTRY %DomainParsing (v1: f32[]) -> f32[] {
%v1 = f32[] parameter(0)
ROOT %dom = f32[] domain(f32[] %v1), domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
}
)"
},
{
"WhileWithScalarS32Result",
R"(HloModule WhileWithScalarS32Result_module, entry_computation_layout={()->s32[]}
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)"
},
{
"CopyStartAndCopyDone",
R"(HloModule CopyStartAndCopyDone_module, entry_computation_layout={(f32[], f32[2,3]{1,0:S(1)})->(f32[], f32[2,3]{1,0:S(2)})}
ENTRY %CopyStartAndCopyDone (v1: f32[], v2: f32[2,3]) -> (f32[], f32[2,3]) {
%v1 = f32[] parameter(0)
%copy-start.1 = (f32[], f32[], u32[]) copy-start(f32[] %v1), cross_program_prefetch_index=0
%copy-done.1 = f32[] copy-done((f32[], f32[], u32[]) %copy-start.1)
%v2 = f32[2,3]{1,0:S(1)} parameter(1)
%copy-start.2 = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(f32[2,3]{1,0:S(1)} %v2)
%copy-done.2 = f32[2,3]{1,0:S(2)} copy-done((f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) %copy-start.2)
ROOT %tuple = (f32[], f32[2,3]{1,0:S(2)}) tuple(f32[] %copy-done.1, f32[2,3]{1,0:S(2)} %copy-done.2)
}
)"
},
{
"SendRecv",
R"(HloModule TwoSendRecvBothWayRecvFist_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, sharding={{maximal device=1}, {replicated}, {replicated}}
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, sharding={{maximal device=1}, {replicated}}
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, sharding={{maximal device=1}, {replicated}, {replicated}}, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, sharding={maximal device=0}
}
)"
},
{
"SendRecvWithHostTransfer",
R"(HloModule HostTransferSendRecv_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, is_host_transfer=true
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, is_host_transfer=true
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, is_host_transfer=true
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, is_host_transfer=true
}
)"
},
{
"GetTupleElement",
R"(HloModule GetTupleElement_module, entry_computation_layout={()->s32[2,3]{1,0}}
ENTRY %GetTupleElement.v4 () -> s32[2,3] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
%tuple = (f32[3]{0}, s32[2,3]{1,0}) tuple(f32[3]{0} %constant, s32[2,3]{1,0} %constant.1)
ROOT %get-tuple-element = s32[2,3]{1,0} get-tuple-element((f32[3]{0}, s32[2,3]{1,0}) %tuple), index=1, sharding={maximal device=0}
}
)"
},
{
"Call",
R"(HloModule CallR0F32IdentityScalar_module, entry_computation_layout={()->f32[]}
%Identity.v1 (x: f32[]) -> f32[] {
ROOT %x = f32[] parameter(0)
}
ENTRY %CallR0F32IdentityScalar.v2 () -> f32[] {
%constant = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant), to_apply=%Identity.v1
}
)"
},
{
"CustomCallWithOpaque",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config="this string is opaque"
}
)"
},
{
"CustomCallWithBackendConfigInCurlyBraces",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config={key: "value"}
}
)"
},
{
"CustomCallWithLiteral",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=s32[2]{0} {1, 2}
}
)"
},
{
"CustomCallWithLiteralTuple",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=( s32[4]{0} {4, 128, 128, 3}, pred[4]{0} {1, 0, 0, 0} )
}
)"
},
{
"CustomCallWithLiteralR0",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=f32[] 0.1
}
)"
},
{
"ReduceWindow",
R"(HloModule R4UnitWindow_module, entry_computation_layout={(f32[13,12,8,15]{0,3,2,1})->f32[13,3,8,15]{0,3,2,1}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindow.v3 (operand: f32[13,12,8,15]) -> f32[13,3,8,15] {
%operand = f32[13,12,8,15]{0,3,2,1} parameter(0)
%constant = f32[] constant(0)
ROOT %reduce-window = f32[13,3,8,15]{0,3,2,1} reduce-window(f32[13,12,8,15]{0,3,2,1} %operand, f32[] %constant), window={size=1x1x7x1 stride=1x4x1x1 pad=0_0x0_0x3_3x0_0}, to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowScalar",
R"(HloModule reduce_window_scalar, entry_computation_layout={()->f32[]}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindowScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = f32[] reduce-window(f32[] %constant, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowVariadic",
R"(HloModule reduce_window_variadic, entry_computation_layout={()->(f32[], f32[])}
%add_F32.v3 (lhs1: f32[], lhs2: f32[], rhs1: f32[], rhs2: f32[]) -> (f32[], f32[]) {
%lhs1 = f32[] parameter(0)
%rhs1 = f32[] parameter(2)
%add1 = f32[] add(f32[] %lhs1, f32[] %rhs1)
%lhs2 = f32[] parameter(1)
%rhs2 = f32[] parameter(3)
%add2 = f32[] add(f32[] %lhs2, f32[] %rhs2)
ROOT %tuple1 = (f32[], f32[]) tuple(f32[] %add1, f32[] %add2)
}
ENTRY %R4UnitWindowScalar () -> (f32[], f32[]) {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = (f32[], f32[]) reduce-window(f32[] %constant, f32[] %constant, f32[] %constant.1, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"Convolution",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}
}
)"
},
{
"ConvolutionDynamic",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %custom-call.52 = f32[1,2,1]{2,0,1} custom-call(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}, custom_call_target="DynamicConvolutionForward", metadata={op_type="Conv2D" op_name="conv1d"}
}
)"
},
{
"ConvolutionR2",
R"(HloModule ConvolveR2_module, entry_computation_layout={(f32[1,2]{1,0}, f32[2,2]{1,0})->f32[1,2]{0,1}}
ENTRY %ConvolveR2.v3 (input: f32[1,2], filter: f32[2,2]) -> f32[1,2] {
%input = f32[1,2]{1,0} parameter(0)
%filter = f32[2,2]{1,0} parameter(1)
ROOT %convolution = f32[1,2]{0,1} convolution(f32[1,2]{1,0} %input, f32[2,2]{1,0} %filter), dim_labels=bf_io->bf
}
)"
},
{
"ConvolutionBackward",
R"(HloModule ConvolveBackward_module, entry_computation_layout={(f32[128,7,7,512]{0,3,2,1}, f32[3,3,512,512]{3,2,1,0})->f32[128,14,14,512]{0,3,2,1}}
ENTRY %ConvolveBackward (input: f32[128,7,7,512], filter: f32[3,3,512,512]) -> f32[128,14,14,512] {
%input = f32[128,7,7,512]{0,3,2,1} parameter(0)
%filter = f32[3,3,512,512]{3,2,1,0} parameter(1)
ROOT %convolution-base-dilated = f32[128,14,14,512]{0,3,2,1} convolution(f32[128,7,7,512]{0,3,2,1} %input, f32[3,3,512,512]{3,2,1,0} %filter), window={size=3x3 pad=1_2x1_2 lhs_dilate=2x2 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
}
)"
},
{
"Reverse4D",
R"(HloModule Reverse4DFloatArrayOnDim01_module, entry_computation_layout={()->f32[4,3,2,1]{0,1,2,3}}
ENTRY %Reverse4DFloatArrayOnDim01.v2 () -> f32[4,3,2,1] {
%constant = f32[4,3,2,1]{0,1,2,3} constant({ { { {1}, {2} }, { {3}, {4} }, { {5}, {6} } }, { { {7}, {8} }, { {9}, {10} }, { {11}, {12} } }, { { {13}, {14} }, { {15}, {16} }, { {17}, {18} } }, { { {19}, {20} }, { {21}, {22} }, { {23}, {24} } } })
ROOT %reverse = f32[4,3,2,1]{0,1,2,3} reverse(f32[4,3,2,1]{0,1,2,3} %constant), dimensions={0,1}
}
)"
},
{
"Concat",
R"(HloModule Concat2x3With2x5_module, entry_computation_layout={()->f32[2,8]{1,0}}
ENTRY %Concat2x3With2x5.v3 () -> f32[2,8] {
%constant = f32[2,3]{1,0} constant({ { 0, 1, 2 }, { 1000, 1001, 1002 } })
%constant.1 = f32[2,5]{1,0} constant({ { 64, 65, 66, 67, 68 }, { 1064, 1065, 1066, 1067, 1068 } })
ROOT %concatenate = f32[2,8]{1,0} concatenate(f32[2,3]{1,0} %constant, f32[2,5]{1,0} %constant.1), dimensions={1}
}
)"
},
{
"SelectAndScatter",
R"(HloModule R4F32OverlapSmall_module, entry_computation_layout={()->f32[4,5,1,1]{3,2,1,0}}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE, type=TOTALORDER
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %R4F32OverlapSmall.v4 () -> f32[4,5,1,1] {
%constant = f32[4,5,1,1]{3,2,1,0} constant({ { { {7} }, { {2} }, { {5} }, { {3} }, { {8} } }, { { {3} }, { {8} }, { {9} }, { {3} }, { {4} } }, { { {1} }, { {5} }, { {7} }, { {5} }, { {6} } }, { { {0} }, { {6} }, { {2} }, { {10} }, { {2} } } })
%constant.1 = f32[2,2,1,1]{3,2,1,0} constant({ { { {2} }, { {6} } }, { { {3} }, { {1} } } })
%constant.2 = f32[] constant(0)
ROOT %select-and-scatter = f32[4,5,1,1]{3,2,1,0} select-and-scatter(f32[4,5,1,1]{3,2,1,0} %constant, f32[2,2,1,1]{3,2,1,0} %constant.1, f32[] %constant.2), window={size=2x3x1x1 stride=2x2x1x1}, select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"SelectAndScatterScalar",
R"(HloModule select_and_scatter_scalar, entry_computation_layout={()->f32[]}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %SelectAndScatterScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
ROOT %select-and-scatter = f32[] select-and-scatter(f32[] %constant, f32[] %constant.1, f32[] %constant.2), select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"Slice",
R"(HloModule slice_module, entry_computation_layout={(f32[3,3,4,4]{3,2,1,0})->f32[3,3,2,4]{3,2,1,0}}
ENTRY %slice.v2 (p0: f32[3,3,4,4]) -> f32[3,3,2,4] {
%p0 = f32[3,3,4,4]{3,2,1,0} parameter(0)
ROOT %slice = f32[3,3,2,4]{3,2,1,0} slice(f32[3,3,4,4]{3,2,1,0} %p0), slice={[0:3:1], [0:3:1], [0:4:2], [0:4:1]}
}
)"
},
{
"SliceNoStride",
R"(HloModule Slice3x3x3_To_1x3x3_F32_module, entry_computation_layout={()->f32[1,3,3]{2,1,0}}
ENTRY %Slice3x3x3_To_1x3x3_F32.v2 () -> f32[1,3,3] {
%constant = f32[3,3,3]{2,1,0} constant({ { { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 } }, { { 9, 10, 11 }, { 12, 13, 14 }, { 15, 16, 17 } }, { { 18, 19, 20 }, { 21, 22, 23 }, { 24, 25, 26 } } })
ROOT %slice = f32[1,3,3]{2,1,0} slice(f32[3,3,3]{2,1,0} %constant), slice={[0:1], [0:3], [0:3]}
}
)"
},
{
"SliceR0",
R"(HloModule SliceR0_module, entry_computation_layout={()->s32[]}
ENTRY %SliceR0.v2 () -> s32[] {
%constant = s32[] constant(1)
ROOT %slice = s32[] slice(s32[] %constant), slice={}
}
)"
},
{
"Transpose",
R"(HloModule Transpose_module, entry_computation_layout={()->s32[1,2,3]{2,1,0}}
ENTRY %Transpose.v2 () -> s32[1,2,3] {
%constant = s32[1,2,3]{2,1,0} constant({ { { 1, 2, 3 }, { 4, 5, 6 } } })
ROOT %transpose = s32[1,2,3]{2,1,0} transpose(s32[1,2,3]{2,1,0} %constant), dimensions={0,1,2}
}
)"
},
{
"TransposeC128",
R"(HloModule TransposeC128_module, entry_computation_layout={(c128[1,2,3]{2,1,0})->c128[1,2,3]{2,1,0}}
ENTRY %Transpose.v3 (input: c128[1,2,3]) -> c128[1,2,3] {
%input = c128[1,2,3]{2,1,0} parameter(0)
ROOT %transpose = c128[1,2,3]{2,1,0} transpose(c128[1,2,3]{2,1,0} %input), dimensions={0,1,2}
}
)"
},
{
"TriangularSolve",
R"(HloModule TriangularSolve_module, entry_computation_layout={(f32[4,4]{1,0}, f32[3,4]{1,0})->f32[3,4]{1,0}}
ENTRY %SimpleRightLowerNotranspose.4 (a.1: f32[4,4], b.2: f32[3,4]) -> f32[3,4] {
%a.1 = f32[4,4]{1,0} parameter(0)
%b.2 = f32[3,4]{1,0} parameter(1)
ROOT %triangular-solve.3 = f32[3,4]{1,0} triangular-solve(f32[4,4]{1,0} %a.1, f32[3,4]{1,0} %b.2), lower=true, transpose_a=NO_TRANSPOSE
}
)"
},
{
"DynamicSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[1]{0})->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[1]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[1]{0} constant({0})
%start_index = s32[1]{0} parameter(1)
%concatenate = s32[3]{0} concatenate(s32[1]{0} %constant, s32[1]{0} %constant, s32[1]{0} %start_index), dimensions={0}
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[3]{0} %concatenate), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicSliceScalarIndices",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[])->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[] constant(0)
%start_index = s32[] parameter(1)
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[] %constant, s32[] %constant, s32[] %start_index), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicUpdateSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[4]{0})->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_indices: s32[4]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_indices = s32[4]{0} parameter(2)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[4]{0} %start_indices)
}
)"
},
{
"DynamicUpdateSliceScalarIndex",
R"(HloModule DynamicUpdateSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[], s32[], s32[], s32[])->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_index.0: s32[], start_index.1: s32[], start_index.2: s32[], start_index.3: s32[]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_index.0 = s32[] parameter(2)
%start_index.1 = s32[] parameter(3)
%start_index.2 = s32[] parameter(4)
%start_index.3 = s32[] parameter(5)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[] %start_index.0, s32[] %start_index.1, s32[] %start_index.2, s32[] %start_index.3)
}
)"
},
{
"BatchNormTraining",
R"(HloModule BasicTraining_module, entry_computation_layout={()->(f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BasicTraining.v4 () -> (f32[2,2,1,2], f32[2], f32[2]) {
%constant = f32[2,2,1,2]{3,2,1,0} constant({ { { { 1, 2 } }, { { 3, 4 } } }, { { { 5, 6 } }, { { 7, 8 } } } })
%constant.1 = f32[2]{0} constant({2, 3})
%constant.2 = f32[2]{0} constant({1, 2})
ROOT %batch-norm-training = (f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-training(f32[2,2,1,2]{3,2,1,0} %constant, f32[2]{0} %constant.1, f32[2]{0} %constant.2), epsilon=0.001, feature_index=3
}
)"
},
{
"BatchNormInference",
R"(HloModule BatchNormInference_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0})->f32[2,2,2,2]{3,2,1,0}}
ENTRY %BatchNormInference.v6 (input: f32[2,2,2,2], offset: f32[2], scale: f32[2], mean: f32[2], variance: f32[2]) -> f32[2,2,2,2] {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%offset = f32[2]{0} parameter(1)
%scale = f32[2]{0} parameter(2)
%mean = f32[2]{0} parameter(3)
%variance = f32[2]{0} parameter(4)
ROOT %batch-norm-inference = f32[2,2,2,2]{3,2,1,0} batch-norm-inference(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %offset, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance), epsilon=0.001, feature_index=0
}
)"
},
{
"BatchNormGrad",
R"(HloModule BatchNormGrad_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,2,2,2]{3,2,1,0})->(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BatchNormGrad.v4 (input: f32[2,2,2,2], scale: f32[2], mean: f32[2], variance: f32[2], grad_output: f32[2,2,2,2]) -> (f32[2,2,2,2], f32[2], f32[2]) {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%scale = f32[2]{0} parameter(1)
%mean = f32[2]{0} parameter(2)
%variance = f32[2]{0} parameter(3)
%grad_output = f32[2,2,2,2]{3,2,1,0} parameter(4)
ROOT %batch-norm-grad = (f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-grad(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance, f32[2,2,2,2]{3,2,1,0} %grad_output), epsilon=0.001, feature_index=0
}
)"
},
{
"Fft",
R"(HloModule Fft_module, entry_computation_layout={(c64[8,32]{1,0})->c64[8,32]{1,0}}
ENTRY %Fft (input: c64[8,32]) -> c64[8,32] {
%input = c64[8,32]{1,0} parameter(0)
ROOT %fft = c64[8,32]{1,0} fft(c64[8,32]{1,0} %input), fft_type=FFT, fft_length={32}
}
)"
},
{
"Ifft2d",
R"(HloModule Ifft2d_module, entry_computation_layout={(c64[5,8,32]{2,1,0})->c64[5,8,32]{2,1,0}}
ENTRY %Ifft2d (input: c64[5,8,32]) -> c64[5,8,32] {
%input = c64[5,8,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,8,32]{2,1,0} fft(c64[5,8,32]{2,1,0} %input), fft_type=IFFT, fft_length={8,32}
}
)"
},
{
"Rfft2d",
R"(HloModule Rfft2d_module, entry_computation_layout={(f32[5,64,32]{2,1,0})->c64[5,64,17]{2,1,0}}
ENTRY %Rfft2d (input: f32[5,64,32]) -> c64[5,64,17] {
%input = f32[5,64,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,64,17]{2,1,0} fft(f32[5,64,32]{2,1,0} %input), fft_type=RFFT, fft_length={64,32}
}
)"
},
{
"Irfft3d",
R"(HloModule Irfft3d_module, entry_computation_layout={(c64[5,64,128,33]{3,2,1,0})->f32[5,64,128,64]{3,2,1,0}}
ENTRY %Irfft3d (input: c64[5,64,128,33]) -> f32[5,64,128,64] {
%input = c64[5,64,128,33]{3,2,1,0} parameter(0)
ROOT %fft = f32[5,64,128,64]{3,2,1,0} fft(c64[5,64,128,33]{3,2,1,0} %input), fft_type=IRFFT, fft_length={64,128,64}
}
)"
},
{
"Pad",
R"(HloModule Pad1DS3Array_module, entry_computation_layout={()->f32[7]{0}}
ENTRY %Pad1DS3Array.v3 () -> f32[7] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = f32[] constant(0.1)
ROOT %pad = f32[7]{0} pad(f32[3]{0} %constant, f32[] %constant.1), padding=3_1
}
)"
},
{
"PadHasInterior",
R"(HloModule PadHasInterior_module, entry_computation_layout={(f32[1,25,7,7]{3,2,1,0})->f32[1,2 | std::pair<HloInstruction*, HloParserImpl::LocTy>*
HloParserImpl::FindInstruction(const std::string& name,
const optional<Shape>& shape) {
std::pair<HloInstruction*, LocTy>* instr = nullptr;
if (!name.empty()) {
instr = tsl::gtl::FindOrNull(current_name_table(), name);
}
if (instr == nullptr && create_missing_instruction_ != nullptr &&
scoped_name_tables_.size() == 1) {
if (!shape.has_value()) {
Error(lexer_.GetLoc(),
"Operand had no shape in HLO text; cannot create parameter for "
"single-instruction module.");
return nullptr;
}
return create_missing_instruction_(name, *shape);
}
if (instr != nullptr && shape.has_value() &&
!ShapeUtil::Compatible(instr->first->shape(), shape.value())) {
Error(
lexer_.GetLoc(),
StrCat("The declared operand shape ",
ShapeUtil::HumanStringWithLayout(shape.value()),
" is not compatible with the shape of the operand instruction ",
ShapeUtil::HumanStringWithLayout(instr->first->shape()), "."));
return nullptr;
}
return instr;
} | |
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include <unordered_map>
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
namespace grappler {
namespace {
BCast::Vec ShapeDims(const TensorShapeProto& shape) {
BCast::Vec dims;
dims.reserve(shape.dim_size());
for (int i = 0; i < shape.dim_size(); ++i)
dims.push_back(shape.dim(i).size());
return dims;
}
}
bool IsKnown(const TensorShapeProto::Dim& dim) { return dim.size() >= 0; }
bool IsKnownSymbolically(const TensorShapeProto::Dim& dim) {
return dim.size() <= -2;
}
bool IsUnknown(const TensorShapeProto::Dim& dim) { return dim.size() == -1; }
bool ShapeIsSymbolicallyDefined(const TensorShapeProto& shape) {
return !shape.unknown_rank() &&
std::all_of(
shape.dim().begin(), shape.dim().end(),
[](const TensorShapeProto::Dim& dim) { return !IsUnknown(dim); });
}
bool ShapeIsSymbolicallyDefined(const OpInfo::TensorProperties& properties) {
return ShapeIsSymbolicallyDefined(properties.shape());
}
int Rank(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
return shape.dim_size();
}
int64_t NumCoefficients(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
int64_t num_coefficients = 1;
for (const auto& dim : shape.dim()) {
if (dim.size() < 0) {
return -1;
}
num_coefficients *= dim.size();
}
return num_coefficients;
}
bool ShapesSymbolicallyEqual(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank() ||
left.dim_size() != right.dim_size()) {
return false;
}
for (int i = 0; i < left.dim_size(); ++i) {
const auto& ldim = left.dim(i);
const auto& rdim = right.dim(i);
if (IsUnknown(ldim) || IsUnknown(rdim) || ldim.size() != rdim.size()) {
return false;
}
}
return true;
}
bool ShapesSymbolicallyEqual(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesSymbolicallyEqual(left.shape(), right.shape());
}
bool ShapesBroadcastable(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
return bcast.IsValid();
}
bool ShapesBroadcastable(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesBroadcastable(left.shape(), right.shape());
}
bool ShapeAfterBroadcast(const TensorShapeProto& left,
const TensorShapeProto& right,
TensorShapeProto* output_shape) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
if (!bcast.IsValid()) {
return false;
}
output_shape->set_unknown_rank(false);
output_shape->clear_dim();
for (const auto& dim : bcast.output_shape()) {
output_shape->add_dim()->set_size(dim);
}
return true;
}
bool CompareSymbolicallyShapedTensorSizes(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank()) {
return false;
}
int64_t left_defined_size = 1;
int64_t right_defined_size = 1;
std::unordered_map<int64_t, int64_t> left_unknown_dims;
std::unordered_map<int64_t, int64_t> right_unknown_dims;
int64_t unknown_dim_id = 1;
auto process_dimensions =
[&unknown_dim_id](const TensorShapeProto& shape, int64* defined_size,
std::unordered_map<int64, int64>* unknown_dims) {
for (int i = 0; i < shape.dim_size(); ++i) {
const auto& dim = shape.dim(i);
int64_t dim_size = dim.size();
if (dim_size > 0) {
*defined_size *= dim_size;
} else if (IsUnknown(dim)) {
++(*unknown_dims)[unknown_dim_id++];
} else if (IsKnownSymbolically(dim)) {
++(*unknown_dims)[dim_size];
}
}
};
process_dimensions(left, &left_defined_size, &left_unknown_dims);
process_dimensions(right, &right_defined_size, &right_unknown_dims);
std::set<int64_t> unknown_dims;
for (const auto& el : left_unknown_dims) unknown_dims.insert(el.first);
for (const auto& el : right_unknown_dims) unknown_dims.insert(el.first);
for (int64_t unknown_dim : unknown_dims) {
int64_t co_occurrence = std::min(left_unknown_dims[unknown_dim],
right_unknown_dims[unknown_dim]);
left_unknown_dims[unknown_dim] -= co_occurrence;
right_unknown_dims[unknown_dim] -= co_occurrence;
}
int64_t left_unbalanced_unknown_dims = 0;
int64_t right_unbalanced_unknown_dims = 0;
for (const auto& el : left_unknown_dims)
left_unbalanced_unknown_dims += el.second;
for (const auto& el : right_unknown_dims)
right_unbalanced_unknown_dims += el.second;
if (left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims == 0) {
return left_defined_size < right_defined_size;
}
if (left_defined_size <= right_defined_size &&
left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims > 0) {
return true;
}
return false;
}
bool CompareSymbolicallyShapedTensorSizes(
const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return CompareSymbolicallyShapedTensorSizes(left.shape(), right.shape());
}
int64_t ComputeSizeRatio(const TensorShapeProto& numerator,
const TensorShapeProto& denominator) {
if (numerator.unknown_rank() || denominator.unknown_rank()) {
return -1;
}
std::multiset<int> symbolic_dims;
int64_t num = 1;
for (const auto& dim : numerator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
symbolic_dims.insert(dim.size());
} else {
num *= dim.size();
}
}
int64_t denom = 1;
for (const auto& dim : denominator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
auto it = symbolic_dims.find(dim.size());
if (it == symbolic_dims.end()) {
return -1;
}
symbolic_dims.erase(it);
} else {
denom *= dim.size();
}
}
if (denom == 0) {
return -1;
}
if (!symbolic_dims.empty()) {
return -1;
}
return num / denom;
}
}
} | #include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class SymbolicShapesTest : public ::testing::Test {
protected:
TensorShapeProto MakeUnknown() {
TensorShapeProto shape;
shape.set_unknown_rank(true);
return shape;
}
TensorShapeProto MakeShape(std::vector<int> dims) {
TensorShapeProto shape;
for (int dim_size : dims) {
TensorShapeProto::Dim dim;
dim.set_size(dim_size);
*shape.add_dim() = dim;
}
return shape;
}
};
bool operator<(const TensorShapeProto& lhs, const TensorShapeProto& rhs) {
return CompareSymbolicallyShapedTensorSizes(lhs, rhs);
}
TEST_F(SymbolicShapesTest, ShapeIsSymbolicallyDefined) {
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeUnknown()));
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeShape({-1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesSymbolicallyEqual) {
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesBroadcastable) {
EXPECT_FALSE(ShapesBroadcastable(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2}), MakeShape({1, -3})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 4}), MakeShape({-2, 8})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-2, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 32}), MakeShape({-2, 1})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -3})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-3}), MakeShape({-2, -3})));
TensorShapeProto output_shape;
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({1, 2}), MakeShape({1, 2}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 2}), MakeShape({-2, 2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 32}), MakeShape({-2, 1}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 32}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -3}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({-3}), MakeShape({-2, -3}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
}
TEST_F(SymbolicShapesTest, CompareSymbolicallyShapedTensorSizes) {
EXPECT_TRUE(MakeShape({1, 1, 32}) < MakeShape({32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({2048}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-1, 32, 32}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, -2, 32}));
EXPECT_FALSE(MakeShape({1, -2, 32}) < MakeShape({-3, 32, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({1, -1, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({-1, -1, 32}));
EXPECT_FALSE(MakeShape({-1, -1, 32}) < MakeShape({1, -1, 32}));
}
TEST_F(SymbolicShapesTest, RankAndNumCoeff) {
EXPECT_EQ(2, Rank(MakeShape({32, 32})));
EXPECT_EQ(32 * 32, NumCoefficients(MakeShape({32, 32})));
EXPECT_EQ(2, Rank(MakeShape({-2, 32})));
EXPECT_EQ(-1, NumCoefficients(MakeShape({-2, 32})));
TensorShapeProto shape;
shape.set_unknown_rank(true);
EXPECT_EQ(-1, Rank(shape));
EXPECT_EQ(-1, NumCoefficients(shape));
}
TEST_F(SymbolicShapesTest, SizeRatio) {
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({32, 32}), MakeShape({32, 2})));
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, 2})));
EXPECT_EQ(16,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, 2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, 2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, -2}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, -2})));
EXPECT_EQ(1, ComputeSizeRatio(MakeShape({-2, -3}), MakeShape({-3, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 0})));
}
}
}
} | bool ShapeIsSymbolicallyDefined(const TensorShapeProto& shape) {
return !shape.unknown_rank() &&
std::all_of(
shape.dim().begin(), shape.dim().end(),
[](const TensorShapeProto::Dim& dim) { return !IsUnknown(dim); });
} | TEST_F(SymbolicShapesTest, ShapeIsSymbolicallyDefined) {
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeUnknown()));
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeShape({-1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({-2, 2})));
} |
#include "absl/strings/internal/cordz_info.h"
#include <cstdint>
#include "absl/base/config.h"
#include "absl/base/internal/spinlock.h"
#include "absl/container/inlined_vector.h"
#include "absl/debugging/stacktrace.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cordz_handle.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t CordzInfo::kMaxStackDepth;
#endif
ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{absl::kConstInit};
namespace {
class CordRepAnalyzer {
public:
explicit CordRepAnalyzer(CordzStatistics& statistics)
: statistics_(statistics) {}
void AnalyzeCordRep(const CordRep* rep) {
ABSL_ASSERT(rep != nullptr);
size_t refcount = rep->refcount.Get();
RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1};
if (repref.tag() == CRC) {
statistics_.node_count++;
statistics_.node_counts.crc++;
memory_usage_.Add(sizeof(CordRepCrc), repref.refcount);
repref = repref.Child(repref.rep->crc()->child);
}
repref = CountLinearReps(repref, memory_usage_);
switch (repref.tag()) {
case CordRepKind::BTREE:
AnalyzeBtree(repref);
break;
default:
ABSL_ASSERT(repref.tag() == CordRepKind::UNUSED_0);
break;
}
statistics_.estimated_memory_usage += memory_usage_.total;
statistics_.estimated_fair_share_memory_usage +=
static_cast<size_t>(memory_usage_.fair_share);
}
private:
struct RepRef {
const CordRep* rep;
size_t refcount;
RepRef Child(const CordRep* child) const {
if (child == nullptr) return RepRef{nullptr, 0};
return RepRef{child, refcount * child->refcount.Get()};
}
constexpr CordRepKind tag() const {
ABSL_ASSERT(rep == nullptr || rep->tag != CordRepKind::UNUSED_0);
return rep ? static_cast<CordRepKind>(rep->tag) : CordRepKind::UNUSED_0;
}
};
struct MemoryUsage {
size_t total = 0;
double fair_share = 0.0;
void Add(size_t size, size_t refcount) {
total += size;
fair_share += static_cast<double>(size) / refcount;
}
};
void CountFlat(size_t size) {
statistics_.node_count++;
statistics_.node_counts.flat++;
if (size <= 64) {
statistics_.node_counts.flat_64++;
} else if (size <= 128) {
statistics_.node_counts.flat_128++;
} else if (size <= 256) {
statistics_.node_counts.flat_256++;
} else if (size <= 512) {
statistics_.node_counts.flat_512++;
} else if (size <= 1024) {
statistics_.node_counts.flat_1k++;
}
}
RepRef CountLinearReps(RepRef rep, MemoryUsage& memory_usage) {
while (rep.tag() == SUBSTRING) {
statistics_.node_count++;
statistics_.node_counts.substring++;
memory_usage.Add(sizeof(CordRepSubstring), rep.refcount);
rep = rep.Child(rep.rep->substring()->child);
}
if (rep.tag() >= FLAT) {
size_t size = rep.rep->flat()->AllocatedSize();
CountFlat(size);
memory_usage.Add(size, rep.refcount);
return RepRef{nullptr, 0};
}
if (rep.tag() == EXTERNAL) {
statistics_.node_count++;
statistics_.node_counts.external++;
size_t size = rep.rep->length + sizeof(CordRepExternalImpl<intptr_t>);
memory_usage.Add(size, rep.refcount);
return RepRef{nullptr, 0};
}
return rep;
}
void AnalyzeBtree(RepRef rep) {
statistics_.node_count++;
statistics_.node_counts.btree++;
memory_usage_.Add(sizeof(CordRepBtree), rep.refcount);
const CordRepBtree* tree = rep.rep->btree();
if (tree->height() > 0) {
for (CordRep* edge : tree->Edges()) {
AnalyzeBtree(rep.Child(edge));
}
} else {
for (CordRep* edge : tree->Edges()) {
CountLinearReps(rep.Child(edge), memory_usage_);
}
}
}
CordzStatistics& statistics_;
MemoryUsage memory_usage_;
};
}
CordzInfo* CordzInfo::Head(const CordzSnapshot& snapshot) {
ABSL_ASSERT(snapshot.is_snapshot());
CordzInfo* head = global_list_.head.load(std::memory_order_acquire);
ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(head));
return head;
}
CordzInfo* CordzInfo::Next(const CordzSnapshot& snapshot) const {
ABSL_ASSERT(snapshot.is_snapshot());
CordzInfo* next = ci_next_.load(std::memory_order_acquire);
ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(this));
ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(next));
return next;
}
void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method,
int64_t sampling_stride) {
assert(cord.is_tree());
assert(!cord.is_profiled());
CordzInfo* cordz_info =
new CordzInfo(cord.as_tree(), nullptr, method, sampling_stride);
cord.set_cordz_info(cordz_info);
cordz_info->Track();
}
void CordzInfo::TrackCord(InlineData& cord, const InlineData& src,
MethodIdentifier method) {
assert(cord.is_tree());
assert(src.is_tree());
CordzInfo* cordz_info = cord.cordz_info();
if (cordz_info != nullptr) cordz_info->Untrack();
cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method,
src.cordz_info()->sampling_stride());
cord.set_cordz_info(cordz_info);
cordz_info->Track();
}
void CordzInfo::MaybeTrackCordImpl(InlineData& cord, const InlineData& src,
MethodIdentifier method) {
if (src.is_profiled()) {
TrackCord(cord, src, method);
} else if (cord.is_profiled()) {
cord.cordz_info()->Untrack();
cord.clear_cordz_info();
}
}
CordzInfo::MethodIdentifier CordzInfo::GetParentMethod(const CordzInfo* src) {
if (src == nullptr) return MethodIdentifier::kUnknown;
return src->parent_method_ != MethodIdentifier::kUnknown ? src->parent_method_
: src->method_;
}
size_t CordzInfo::FillParentStack(const CordzInfo* src, void** stack) {
assert(stack);
if (src == nullptr) return 0;
if (src->parent_stack_depth_) {
memcpy(stack, src->parent_stack_, src->parent_stack_depth_ * sizeof(void*));
return src->parent_stack_depth_;
}
memcpy(stack, src->stack_, src->stack_depth_ * sizeof(void*));
return src->stack_depth_;
}
CordzInfo::CordzInfo(CordRep* rep, const CordzInfo* src,
MethodIdentifier method, int64_t sampling_stride)
: rep_(rep),
stack_depth_(
static_cast<size_t>(absl::GetStackTrace(stack_,
kMaxStackDepth,
1))),
parent_stack_depth_(FillParentStack(src, parent_stack_)),
method_(method),
parent_method_(GetParentMethod(src)),
create_time_(absl::Now()),
sampling_stride_(sampling_stride) {
update_tracker_.LossyAdd(method);
if (src) {
update_tracker_.LossyAdd(src->update_tracker_);
}
}
CordzInfo::~CordzInfo() {
if (ABSL_PREDICT_FALSE(rep_)) {
CordRep::Unref(rep_);
}
}
void CordzInfo::Track() {
SpinLockHolder l(&list_->mutex);
CordzInfo* const head = list_->head.load(std::memory_order_acquire);
if (head != nullptr) {
head->ci_prev_.store(this, std::memory_order_release);
}
ci_next_.store(head, std::memory_order_release);
list_->head.store(this, std::memory_order_release);
}
void CordzInfo::Untrack() {
ODRCheck();
{
SpinLockHolder l(&list_->mutex);
CordzInfo* const head = list_->head.load(std::memory_order_acquire);
CordzInfo* const next = ci_next_.load(std::memory_order_acquire);
CordzInfo* const prev = ci_prev_.load(std::memory_order_acquire);
if (next) {
ABSL_ASSERT(next->ci_prev_.load(std::memory_order_acquire) == this);
next->ci_prev_.store(prev, std::memory_order_release);
}
if (prev) {
ABSL_ASSERT(head != this);
ABSL_ASSERT(prev->ci_next_.load(std::memory_order_acquire) == this);
prev->ci_next_.store(next, std::memory_order_release);
} else {
ABSL_ASSERT(head == this);
list_->head.store(next, std::memory_order_release);
}
}
if (SafeToDelete()) {
UnsafeSetCordRep(nullptr);
delete this;
return;
}
{
absl::MutexLock lock(&mutex_);
if (rep_) CordRep::Ref(rep_);
}
CordzHandle::Delete(this);
}
void CordzInfo::Lock(MethodIdentifier method)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_) {
mutex_.Lock();
update_tracker_.LossyAdd(method);
assert(rep_);
}
void CordzInfo::Unlock() ABSL_UNLOCK_FUNCTION(mutex_) {
bool tracked = rep_ != nullptr;
mutex_.Unlock();
if (!tracked) {
Untrack();
}
}
absl::Span<void* const> CordzInfo::GetStack() const {
return absl::MakeConstSpan(stack_, stack_depth_);
}
absl::Span<void* const> CordzInfo::GetParentStack() const {
return absl::MakeConstSpan(parent_stack_, parent_stack_depth_);
}
CordzStatistics CordzInfo::GetCordzStatistics() const {
CordzStatistics stats;
stats.method = method_;
stats.parent_method = parent_method_;
stats.update_tracker = update_tracker_;
if (CordRep* rep = RefCordRep()) {
stats.size = rep->length;
CordRepAnalyzer analyzer(stats);
analyzer.AnalyzeCordRep(rep);
CordRep::Unref(rep);
}
return stats;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cordz_info.h"
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
#include "absl/strings/cordz_test_helpers.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cordz_handle.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Ne;
using ::testing::SizeIs;
auto constexpr kUnknownMethod = CordzUpdateTracker::kUnknown;
auto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString;
auto constexpr kChildMethod = CordzUpdateTracker::kConstructorCord;
auto constexpr kUpdateMethod = CordzUpdateTracker::kAppendString;
std::vector<const CordzHandle*> DeleteQueue() {
return CordzHandle::DiagnosticsGetDeleteQueue();
}
std::string FormatStack(absl::Span<void* const> raw_stack) {
static constexpr size_t buf_size = 1 << 14;
std::unique_ptr<char[]> buf(new char[buf_size]);
std::string output;
for (void* stackp : raw_stack) {
if (absl::Symbolize(stackp, buf.get(), buf_size)) {
absl::StrAppend(&output, " ", buf.get(), "\n");
}
}
return output;
}
TEST(CordzInfoTest, TrackCord) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
ASSERT_THAT(info, Ne(nullptr));
EXPECT_FALSE(info->is_snapshot());
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));
EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));
info->Untrack();
}
TEST(CordzInfoTest, MaybeTrackChildCordWithoutSampling) {
CordzSamplingIntervalHelper sample_none(99999);
TestCordData parent, child;
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, MaybeTrackChildCordWithSampling) {
CordzSamplingIntervalHelper sample_all(1);
TestCordData parent, child;
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingParentSampled) {
CordzSamplingIntervalHelper sample_none(99999);
TestCordData parent, child;
CordzInfo::TrackCord(parent.data, kTrackCordMethod, 1);
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
CordzInfo* parent_info = parent.data.cordz_info();
CordzInfo* child_info = child.data.cordz_info();
ASSERT_THAT(child_info, Ne(nullptr));
EXPECT_THAT(child_info->GetCordRepForTesting(), Eq(child.rep.rep));
EXPECT_THAT(child_info->GetParentStack(), parent_info->GetStack());
parent_info->Untrack();
child_info->Untrack();
}
TEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingChildSampled) {
CordzSamplingIntervalHelper sample_none(99999);
TestCordData parent, child;
CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, MaybeTrackChildCordWithSamplingChildSampled) {
CordzSamplingIntervalHelper sample_all(1);
TestCordData parent, child;
CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);
CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);
EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));
}
TEST(CordzInfoTest, UntrackCord) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
info->Untrack();
EXPECT_THAT(DeleteQueue(), SizeIs(0u));
}
TEST(CordzInfoTest, UntrackCordWithSnapshot) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
CordzSnapshot snapshot;
info->Untrack();
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));
EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));
EXPECT_THAT(DeleteQueue(), ElementsAre(info, &snapshot));
}
TEST(CordzInfoTest, SetCordRep) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
TestCordRep rep;
info->Lock(CordzUpdateTracker::kAppendCord);
info->SetCordRep(rep.rep);
info->Unlock();
EXPECT_THAT(info->GetCordRepForTesting(), Eq(rep.rep));
info->Untrack();
}
TEST(CordzInfoTest, SetCordRepNullUntracksCordOnUnlock) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
info->Lock(CordzUpdateTracker::kAppendString);
info->SetCordRep(nullptr);
EXPECT_THAT(info->GetCordRepForTesting(), Eq(nullptr));
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));
info->Unlock();
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));
}
TEST(CordzInfoTest, RefCordRep) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
size_t refcount = data.rep.rep->refcount.Get();
EXPECT_THAT(info->RefCordRep(), Eq(data.rep.rep));
EXPECT_THAT(data.rep.rep->refcount.Get(), Eq(refcount + 1));
CordRep::Unref(data.rep.rep);
info->Untrack();
}
#if GTEST_HAS_DEATH_TEST
TEST(CordzInfoTest, SetCordRepRequiresMutex) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
TestCordRep rep;
EXPECT_DEBUG_DEATH(info->SetCordRep(rep.rep), ".*");
info->Untrack();
}
#endif
TEST(CordzInfoTest, TrackUntrackHeadFirstV2) {
CordzSnapshot snapshot;
EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info1 = data.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
TestCordData data2;
CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);
CordzInfo* info2 = data2.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
EXPECT_THAT(info2->Next(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
info2->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
info1->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
}
TEST(CordzInfoTest, TrackUntrackTailFirstV2) {
CordzSnapshot snapshot;
EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info1 = data.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
TestCordData data2;
CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);
CordzInfo* info2 = data2.data.cordz_info();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
EXPECT_THAT(info2->Next(snapshot), Eq(info1));
EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));
info1->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));
EXPECT_THAT(info2->Next(snapshot), Eq(nullptr));
info2->Untrack();
ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));
}
TEST(CordzInfoTest, StackV2) {
TestCordData data;
static constexpr int kMaxStackDepth = 50;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
std::vector<void*> local_stack;
local_stack.resize(kMaxStackDepth);
local_stack.resize(static_cast<size_t>(
absl::GetStackTrace(local_stack.data(), kMaxStackDepth,
1)));
std::string got_stack = FormatStack(info->GetStack());
std::string expected_stack = FormatStack(local_stack);
EXPECT_THAT(got_stack, HasSubstr(expected_stack));
info->Untrack();
}
CordzInfo* TrackChildCord(InlineData& data, const InlineData& parent) {
CordzInfo::TrackCord(data, parent, kChildMethod);
return data.cordz_info();
}
CordzInfo* TrackParentCord(InlineData& data) {
CordzInfo::TrackCord(data, kTrackCordMethod, 1);
return data.cordz_info();
}
TEST(CordzInfoTest, GetStatistics) {
TestCordData data;
CordzInfo* info = TrackParentCord(data.data);
CordzStatistics statistics = info->GetCordzStatistics();
EXPECT_THAT(statistics.size, Eq(data.rep.rep->length));
EXPECT_THAT(statistics.method, Eq(kTrackCordMethod));
EXPECT_THAT(statistics.parent_method, Eq(kUnknownMethod));
EXPECT_THAT(statistics.update_tracker.Value(kTrackCordMethod), Eq(1));
info->Untrack();
}
TEST(CordzInfoTest, LockCountsMethod) {
TestCordData data;
CordzInfo* info = TrackParentCord(data.data);
info->Lock(kUpdateMethod);
info->Unlock();
info->Lock(kUpdateMethod);
info->Unlock();
CordzStatistics statistics = info->GetCordzStatistics();
EXPECT_THAT(statistics.update_tracker.Value(kUpdateMethod), Eq(2));
info->Untrack();
}
TEST(CordzInfoTest, FromParent) {
TestCordData parent;
TestCordData child;
CordzInfo* info_parent = TrackParentCord(parent.data);
CordzInfo* info_child = TrackChildCord(child.data, parent.data);
std::string stack = FormatStack(info_parent->GetStack());
std::string parent_stack = FormatStack(info_child->GetParentStack());
EXPECT_THAT(stack, Eq(parent_stack));
CordzStatistics statistics = info_child->GetCordzStatistics();
EXPECT_THAT(statistics.size, Eq(child.rep.rep->length));
EXPECT_THAT(statistics.method, Eq(kChildMethod));
EXPECT_THAT(statistics.parent_method, Eq(kTrackCordMethod));
EXPECT_THAT(statistics.update_tracker.Value(kChildMethod), Eq(1));
info_parent->Untrack();
info_child->Untrack();
}
}
}
ABSL_NAMESPACE_END
} | void CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method,
int64_t sampling_stride) {
assert(cord.is_tree());
assert(!cord.is_profiled());
CordzInfo* cordz_info =
new CordzInfo(cord.as_tree(), nullptr, method, sampling_stride);
cord.set_cordz_info(cordz_info);
cordz_info->Track();
} | TEST(CordzInfoTest, TrackCord) {
TestCordData data;
CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);
CordzInfo* info = data.data.cordz_info();
ASSERT_THAT(info, Ne(nullptr));
EXPECT_FALSE(info->is_snapshot());
EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));
EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));
info->Untrack();
} |
#ifndef THIRD_PARTY_CEL_CPP_COMMON_SIZED_INPUT_VIEW_H_
#define THIRD_PARTY_CEL_CPP_COMMON_SIZED_INPUT_VIEW_H_
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
namespace cel {
template <typename T>
class SizedInputView;
namespace sized_input_view_internal {
struct ForwardingTransformer {
template <typename T>
decltype(auto) operator()(T&& to_transformer) const {
return std::forward<T>(to_transformer);
}
};
template <typename C>
using ConstIterator =
std::decay_t<decltype(std::begin(std::declval<const C&>()))>;
template <typename C>
using SizeType = std::decay_t<decltype(std::size(std::declval<const C&>()))>;
template <typename Transformer, typename T, typename Iter>
constexpr bool CanIterateAsType() {
return std::is_convertible_v<
std::add_pointer_t<decltype((std::declval<Transformer>())(
*std::declval<Iter>()))>,
const T*> ||
std::is_convertible_v<
decltype((std::declval<Transformer>())(*std::declval<Iter>())), T>;
}
inline constexpr size_t kSmallSize = sizeof(void*) * 2;
union Storage {
char small[kSmallSize];
void* large;
};
template <typename T>
constexpr bool IsStoredInline() {
return alignof(T) <= alignof(Storage) && sizeof(T) <= kSmallSize;
}
template <typename T>
T* StorageCast(Storage& storage) {
if constexpr (IsStoredInline<T>()) {
return std::launder(reinterpret_cast<T*>(&storage.small[0]));
} else {
return static_cast<T*>(storage.large);
}
}
template <typename T>
const T* StorageCast(const Storage& storage) {
if constexpr (IsStoredInline<T>()) {
return std::launder(reinterpret_cast<const T*>(&storage.small[0]));
} else {
return static_cast<const T*>(storage.large);
}
}
template <typename T, typename Iter, typename Transformer>
constexpr bool IsValueStashRequired() {
return !std::is_convertible_v<
std::add_pointer_t<decltype((std::declval<Transformer>())(
*std::declval<Iter>()))>,
const T*>;
}
template <typename Iter>
struct LargeIteratorStorage {
alignas(Iter) char begin[sizeof(Iter)];
alignas(Iter) char end[sizeof(Iter)];
};
template <typename Transformer>
struct LargeTransformerStorage {
alignas(Transformer) char value[sizeof(Transformer)];
};
template <typename Iter, typename Transformer>
struct LargeIteratorTransformerStorage : LargeIteratorStorage<Iter>,
LargeTransformerStorage<Transformer> {
};
template <typename T>
struct LargeValueStorage {
alignas(T) char value[sizeof(T)];
};
template <typename Iter, typename T>
struct LargeIteratorValueStorage : LargeIteratorStorage<Iter>,
LargeValueStorage<T> {};
template <typename Transformer, typename T>
struct LargeTransformerValueStorage : LargeTransformerStorage<Transformer>,
LargeValueStorage<T> {};
template <typename T, typename Iter, typename Transformer>
struct LargeStorage : LargeIteratorStorage<Iter>,
LargeTransformerStorage<Transformer>,
LargeValueStorage<T> {};
struct RangeStorage {
Storage begin;
Storage end;
Storage transformer;
Storage value_stash;
};
template <typename T>
char* Allocate() {
return static_cast<char*>(
::operator new(sizeof(T), static_cast<std::align_val_t>(alignof(T))));
}
template <typename T>
void Deallocate(void* address) {
#if defined(__cpp_sized_deallocation) && __cpp_sized_deallocation >= 201309L
::operator delete(address, sizeof(T),
static_cast<std::align_val_t>(alignof(T)));
#else
::operator delete(address, static_cast<std::align_val_t>(alignof(T)));
#endif
}
template <typename T, typename Iter, typename Transformer>
void CreateRangeStorage(RangeStorage* range) {
constexpr bool value_stash_required =
IsValueStashRequired<T, Iter, Transformer>() && !IsStoredInline<T>();
if constexpr (!value_stash_required && IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
} else if constexpr (!value_stash_required && !IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
using StorageType = LargeIteratorStorage<Iter>;
auto* storage = Allocate<StorageType>();
range->begin.large = storage + offsetof(StorageType, begin);
range->end.large = storage + offsetof(StorageType, end);
} else if constexpr (!value_stash_required && IsStoredInline<Iter>() &&
!IsStoredInline<Transformer>()) {
using StorageType = LargeTransformerStorage<Transformer>;
auto* storage = Allocate<StorageType>();
range->transformer.large = storage + offsetof(StorageType, transformer);
} else if constexpr (!value_stash_required && !IsStoredInline<Iter>() &&
!IsStoredInline<Transformer>()) {
using StorageType = LargeIteratorTransformerStorage<Iter, Transformer>;
auto* storage = Allocate<StorageType>();
range->begin.large = storage + offsetof(StorageType, begin);
range->end.large = storage + offsetof(StorageType, end);
range->transformer.large = storage + offsetof(StorageType, transformer);
} else if constexpr (value_stash_required && IsStoredInline<Iter>() &&
!IsStoredInline<Transformer>()) {
using StorageType = LargeTransformerValueStorage<Transformer, T>;
auto* storage = Allocate<StorageType>();
range->transformer.large = storage + offsetof(StorageType, transformer);
range->value_stash.large = storage + offsetof(StorageType, value);
} else if constexpr (value_stash_required && !IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
using StorageType = LargeIteratorValueStorage<Transformer, T>;
auto* storage = Allocate<StorageType>();
range->begin.large = storage + offsetof(StorageType, begin);
range->end.large = storage + offsetof(StorageType, end);
range->value_stash.large = storage + offsetof(StorageType, value);
} else if constexpr (value_stash_required && IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
using StorageType = LargeValueStorage<T>;
auto* storage = Allocate<StorageType>();
range->value_stash.large = storage + offsetof(StorageType, value);
} else {
static_assert(value_stash_required);
static_assert(!IsStoredInline<Iter>());
static_assert(!IsStoredInline<Transformer>());
using StorageType = LargeStorage<T, Iter, Transformer>;
auto* storage = Allocate<StorageType>();
range->begin.large = storage + offsetof(StorageType, begin);
range->end.large = storage + offsetof(StorageType, end);
range->transformer.large = storage + offsetof(StorageType, transformer);
range->value_stash.large = storage + offsetof(StorageType, value);
}
}
template <typename T, typename Iter, typename Transformer>
void DestroyRangeStorage(RangeStorage* range) {
constexpr bool value_stash_required =
IsValueStashRequired<T, Iter, Transformer>() && !IsStoredInline<T>();
if constexpr (!value_stash_required && IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
} else if constexpr (!value_stash_required && !IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
using StorageType = LargeIteratorStorage<Iter>;
auto* storage =
static_cast<char*>(range->begin.large) - offsetof(StorageType, begin);
Deallocate<StorageType>(storage);
} else if constexpr (!value_stash_required && IsStoredInline<Iter>() &&
!IsStoredInline<Transformer>()) {
using StorageType = LargeTransformerStorage<Transformer>;
auto* storage = static_cast<char*>(range->transformer.large) -
offsetof(StorageType, transformer);
Deallocate<StorageType>(storage);
} else if constexpr (!value_stash_required && !IsStoredInline<Iter>() &&
!IsStoredInline<Transformer>()) {
using StorageType = LargeIteratorTransformerStorage<Iter, Transformer>;
auto* storage =
static_cast<char*>(range->begin.large) - offsetof(StorageType, begin);
Deallocate<StorageType>(storage);
} else if constexpr (value_stash_required && IsStoredInline<Iter>() &&
!IsStoredInline<Transformer>()) {
using StorageType = LargeTransformerValueStorage<Transformer, T>;
auto* storage = static_cast<char*>(range->transformer.large) -
offsetof(StorageType, transformer);
Deallocate<StorageType>(storage);
} else if constexpr (value_stash_required && !IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
using StorageType = LargeIteratorValueStorage<Transformer, T>;
auto* storage =
static_cast<char*>(range->begin.large) - offsetof(StorageType, begin);
Deallocate<StorageType>(storage);
} else if constexpr (value_stash_required && IsStoredInline<Iter>() &&
IsStoredInline<Transformer>()) {
using StorageType = LargeValueStorage<T>;
auto* storage = static_cast<char*>(range->value_stash.large) -
offsetof(StorageType, value);
Deallocate<StorageType>(storage);
} else {
static_assert(value_stash_required);
static_assert(!IsStoredInline<Iter>());
static_assert(!IsStoredInline<Transformer>());
using StorageType = LargeStorage<T, Iter, Transformer>;
auto* storage =
static_cast<char*>(range->begin.large) - offsetof(StorageType, begin);
Deallocate<StorageType>(storage);
}
}
enum class Operation {
kCreate,
kAdvanceOne,
kCopy,
kMove,
kDestroy,
};
union OperationInput {
struct {
RangeStorage* storage;
void* begin;
void* end;
void* transformer;
} create;
RangeStorage* advance_one;
struct {
const RangeStorage* src;
RangeStorage* dest;
} copy;
struct {
RangeStorage* src;
RangeStorage* dest;
} move;
RangeStorage* destroy;
};
union OperationOutput {
const void* value;
};
using RangeManagerFn = OperationOutput (*)(Operation, const OperationInput&);
template <typename T, typename Iter, typename Transformer>
void RangeManagerDestroy(RangeStorage* range) {
if constexpr (IsValueStashRequired<T, Iter, Transformer>()) {
StorageCast<T>(range->value_stash)->~T();
}
StorageCast<Transformer>(range->transformer)->~Transformer();
StorageCast<Iter>(range->end)->~Iter();
StorageCast<Iter>(range->begin)->~Iter();
DestroyRangeStorage<T, Iter, Transformer>(range);
}
template <typename T, typename Iter, typename Transformer>
const void* RangeManagerAdvanceOne(RangeStorage* range) {
auto* begin = StorageCast<Iter>(range->begin);
auto* end = StorageCast<Iter>(range->end);
if (++(*begin) == *end) {
RangeManagerDestroy<T, Iter, Transformer>(range);
return nullptr;
} else {
auto* transformer = StorageCast<Transformer>(range->transformer);
if constexpr (IsValueStashRequired<T, Iter, Transformer>()) {
auto* value_stash = StorageCast<T>(range->value_stash);
value_stash->~T();
::new (static_cast<void*>(value_stash)) T((*transformer)(**begin));
return value_stash;
} else {
return static_cast<const T*>(std::addressof((*transformer)(**begin)));
}
}
}
template <typename T, typename Iter, typename Transformer>
const void* RangeManagerCreate(RangeStorage* range, Iter begin, Iter end,
Transformer transformer) {
CreateRangeStorage<T, Iter, Transformer>(range);
::new (static_cast<void*>(StorageCast<Iter>(range->begin)))
Iter(std::move(begin));
::new (static_cast<void*>(StorageCast<Iter>(range->end)))
Iter(std::move(end));
auto* transformer_ptr =
::new (static_cast<void*>(StorageCast<Transformer>(range->transformer)))
Transformer(std::move(transformer));
if constexpr (IsValueStashRequired<T, Iter, Transformer>()) {
auto* value_stash = StorageCast<T>(range->value_stash);
::new (static_cast<void*>(value_stash))
T((*transformer_ptr)(**StorageCast<Iter>(range->begin)));
return value_stash;
} else {
return static_cast<const T*>(
std::addressof((*transformer_ptr)(**StorageCast<Iter>(range->begin))));
}
}
template <typename T, typename Iter, typename Transformer>
const void* RangeManagerCopy(const RangeStorage* src, RangeStorage* dest) {
CreateRangeStorage<T, Iter, Transformer>(dest);
::new (static_cast<void*>(StorageCast<Iter>(dest->begin)))
Iter(*StorageCast<Iter>(src->begin));
::new (static_cast<void*>(StorageCast<Iter>(dest->end)))
Iter(*StorageCast<Iter>(src->end));
auto* transformer_ptr =
::new (static_cast<void*>(StorageCast<Transformer>(dest->transformer)))
Transformer(*StorageCast<Transformer>(src->transformer));
if constexpr (IsValueStashRequired<T, Iter, Transformer>()) {
auto* value_stash = StorageCast<T>(dest->value_stash);
::new (static_cast<void*>(value_stash))
T((*transformer_ptr)(**StorageCast<Iter>(dest->begin)));
return value_stash;
} else {
return static_cast<const T*>(
std::addressof((*transformer_ptr)(**StorageCast<Iter>(dest->begin))));
}
}
template <typename T, typename Iter, typename Transformer>
const void* RangeManagerMove(RangeStorage* src, RangeStorage* dest) {
if constexpr (IsValueStashRequired<T, Iter, Transformer>()) {
if constexpr (IsStoredInline<T>()) {
::new (static_cast<void*>(&dest->value_stash.small[0]))
T(std::move(*StorageCast<T>(src->value_stash)));
StorageCast<T>(src->value_stash)->~T();
} else {
dest->value_stash.large = src->value_stash.large;
}
}
if constexpr (IsStoredInline<Transformer>()) {
::new (static_cast<void*>(&dest->transformer.small[0]))
Transformer(std::move(*StorageCast<Transformer>(src->transformer)));
StorageCast<Transformer>(src->transformer)->~Transformer();
} else {
dest->transformer.large = src->transformer.large;
}
if constexpr (IsStoredInline<Iter>()) {
::new (static_cast<void*>(&dest->begin.small[0]))
Iter(std::move(*StorageCast<Iter>(src->begin)));
::new (static_cast<void*>(&dest->end.small[0]))
Iter(std::move(*StorageCast<Iter>(src->end)));
StorageCast<Iter>(src->end)->~Iter();
StorageCast<Iter>(src->begin)->~Iter();
} else {
dest->begin.large = src->begin.large;
dest->end.large = src->end.large;
}
if constexpr (IsValueStashRequired<T, Iter, Transformer>()) {
return StorageCast<T>(dest->value_stash);
} else {
return static_cast<const T*>(
std::addressof(**StorageCast<Iter>(dest->begin)));
}
}
template <typename T, typename Iter, typename Transformer>
OperationOutput RangeManager(Operation op, const OperationInput& input) {
OperationOutput output;
switch (op) {
case Operation::kCreate: {
output.value = RangeManagerCreate<T, Iter, Transformer>(
input.create.storage,
std::move(*static_cast<Iter*>(input.create.begin)),
std::move(*static_cast<Iter*>(input.create.end)),
std::move(*static_cast<Transformer*>(input.create.transformer)));
} break;
case Operation::kAdvanceOne: {
output.value =
RangeManagerAdvanceOne<T, Iter, Transformer>(input.advance_one);
} break;
case Operation::kDestroy: {
RangeManagerDestroy<T, Iter, Transformer>(input.destroy);
output.value = nullptr;
} break;
case Operation::kCopy: {
output.value = RangeManagerCopy<T, Iter, Transformer>(input.copy.src,
input.copy.dest);
} break;
case Operation::kMove: {
output.value = RangeManagerMove<T, Iter, Transformer>(input.move.src,
input.move.dest);
} break;
}
return output;
}
template <typename T>
class Iterator final {
public:
using iterator_category = std::input_iterator_tag;
using value_type = T;
using pointer = const value_type*;
using reference = const value_type&;
using difference_type = ptrdiff_t;
Iterator() = default;
template <typename Iter, typename Transformer>
Iterator(Iter first, Iter last, Transformer transformer) {
if (first != last) {
manager_ = &RangeManager<T, Iter, Transformer>;
value_ = static_cast<pointer>(
((*manager_)(Operation::kCreate,
OperationInput{.create = {.storage = &range_,
.begin = std::addressof(first),
.end = std::addressof(last),
.transformer = std::addressof(
transformer)}}))
.value);
}
}
Iterator(const Iterator& other) { Copy(other); }
Iterator(Iterator&& other) noexcept { Move(other); }
~Iterator() { Destroy(); }
Iterator& operator=(const Iterator& other) {
if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
Destroy();
Copy(other);
}
return *this;
}
Iterator& operator=(Iterator&& other) noexcept {
if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
Destroy();
Move(other);
}
return *this;
}
reference operator*() const {
ABSL_DCHECK(value_ != nullptr) << "SizedInputIterator is at end";
return *value_;
}
pointer operator->() const {
ABSL_DCHECK(value_ != nullptr) << "SizedInputIterator is at end";
return value_;
}
Iterator& operator++() {
ABSL_DCHECK(value_ != nullptr) << "SizedInputIterator is at end";
value_ = static_cast<pointer>(
((*manager_)(Operation::kAdvanceOne,
OperationInput{.advance_one = &range_}))
.value);
if (value_ == nullptr) {
manager_ = nullptr;
}
return *this;
}
friend bool operator==(const Iterator& lhs, const Iterator& rhs) {
ABSL_DCHECK(lhs.manager_ == rhs.manager_ || lhs.manager_ == nullptr ||
rhs.manager_ == nullptr);
ABSL_DCHECK(lhs.value_ == nullptr || rhs.value_ == nullptr ||
lhs.value_ == rhs.value_);
return lhs.value_ == rhs.value_;
}
private:
void Destroy() noexcept {
if (manager_ != nullptr) {
(*manager_)(Operation::kDestroy, OperationInput{.destroy = &range_});
}
}
void Copy(const Iterator& other) {
manager_ = other.manager_;
if (manager_ != nullptr) {
value_ = static_cast<pointer>(
((*manager_)(
Operation::kCopy,
OperationInput{.copy = {.src = &other.range_, .dest = &range_}}))
.value);
} else {
value_ = nullptr;
}
}
void Move(Iterator& other) noexcept {
manager_ = other.manager_;
other.manager_ = nullptr;
if (manager_ != nullptr) {
value_ = static_cast<pointer>(
((*manager_)(
Operation::kMove,
OperationInput{.move = {.src = &other.range_, .dest = &range_}}))
.value);
} else {
value_ = nullptr;
}
}
pointer value_ = nullptr;
RangeManagerFn manager_ = nullptr;
RangeStorage range_;
};
template <typename T>
inline bool operator!=(const Iterator<T>& lhs, const Iterator<T>& rhs) {
return !operator==(lhs, rhs);
}
}
template <typename T>
class SizedInputView final {
public:
using iterator = sized_input_view_internal::Iterator<T>;
using const_iterator = iterator;
using value_type = T;
using reference = const value_type&;
using const_reference = reference;
using pointer = const value_type*;
using const_pointer = pointer;
using size_type = size_t;
SizedInputView() = default;
SizedInputView(const SizedInputView&) = default;
SizedInputView& operator=(const SizedInputView&) = default;
SizedInputView(SizedInputView&& other) noexcept
: begin_(std::move(other.begin_)), size_(other.size_) {
other.size_ = 0;
}
SizedInputView& operator=(SizedInputView&& other) noexcept {
if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
begin_ = std::move(other.begin_);
size_ = other.size_;
other.size_ = 0;
}
return *this;
}
template <typename C,
typename IterType = sized_input_view_internal::ConstIterator<C>,
typename SizeType = sized_input_view_internal::SizeType<C>,
typename = std::enable_if_t<
(sized_input_view_internal::CanIterateAsType<
sized_input_view_internal::ForwardingTransformer, T,
IterType>() &&
std::is_convertible_v<SizeType, size_type> &&
!std::is_same_v<SizedInputView<T>, C>)>>
SizedInputView(const C& c ABSL_ATTRIBUTE_LIFETIME_BOUND)
: SizedInputView(c, sized_input_view_internal::ForwardingTransformer{}) {}
template <
typename C, typename Transformer,
typename IterType = sized_input_view_internal::ConstIterator<C>,
typename SizeType = sized_input_view_internal::SizeType<C>,
typename = std::enable_if_t<(sized_input_view_internal::CanIterateAsType<
Transformer, T, IterType>() &&
std::is_convertible_v<SizeType, size_type> &&
!std::is_same_v<SizedInputView<T>, C>)>>
SizedInputView(const C& c ABSL_ATTRIBUTE_LIFETIME_BOUND,
Transformer&& transformer)
: begin_(std::begin(c), std::end(c),
std::forward<Transformer>(transformer)),
size_(std::size(c)) {}
template <
typename U,
typename = std::enable_if_t<sized_input_view_internal::CanIterateAsType<
sized_input_view_internal::ForwardingTransformer, T,
typename std::initializer_list<U>::const_iterator>()>>
SizedInputView(
const std::initializer_list<U>& c ABSL_ATTRIBUTE_LIFETIME_BOUND)
: SizedInputView(c, sized_input_view_internal::ForwardingTransformer{}) {}
template <
typename U, typename Transformer,
typename = std::enable_if_t<sized_input_view_internal::CanIterateAsType<
Transformer, T, typename std::initializer_list<U>::const_iterator>()>>
SizedInputView(const std::initializer_list<U>& c
ABSL_ATTRIBUTE_LIFETIME_BOUND,
Transformer&& transformer)
: begin_(c.begin(), c.end(), std::forward<Transformer>(transformer)),
size_(c.size()) {}
const iterator& begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return begin_; }
iterator end() const { return iterator(); }
bool empty() const { return size() == 0; }
size_type size() const { return size_; }
private:
iterator begin_;
size_type size_ = 0;
};
}
#endif | #include "common/sized_input_view.h"
#include <array>
#include <iterator>
#include <list>
#include <numeric>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::_;
using testing::ElementsAre;
TEST(SizedInputViewTest, EmptyVector) {
std::vector<int> a;
SizedInputView<int> v(a);
for (const int& i : v) {
FAIL() << "Empty range should not iterate " << i;
}
}
TEST(SizedInputViewTest, Vector) {
std::vector<int> a = {1, 2, 3};
SizedInputView<int> v(a);
auto expected_it = a.begin();
for (const int& i : v) {
EXPECT_EQ(i, *expected_it);
EXPECT_EQ(&i, &*expected_it);
++expected_it;
}
EXPECT_EQ(expected_it, a.end());
}
TEST(SizedInputViewTest, NonEndComparison) {
std::vector<int> a = {1, 2, 3};
SizedInputView<int> v(a);
auto it = v.begin();
++it;
EXPECT_DEBUG_DEATH({ (void)(v.begin() == it); }, _);
}
TEST(SizedInputViewTest, IteratePastEnd) {
std::vector<int> a = {1};
SizedInputView<int> v(a);
auto it = v.begin();
++it;
EXPECT_DEBUG_DEATH({ ++it; }, _);
}
TEST(SizedInputViewTest, SelfAssignment) {
std::vector<int> a = {1, 2, 3};
SizedInputView<int> v(a);
v = v;
EXPECT_THAT(v, ElementsAre(1, 2, 3));
}
TEST(SizedInputViewTest, List) {
std::list<int> a = {1, 2, 3};
SizedInputView<int> v(a);
auto expected_it = a.begin();
for (const int& i : v) {
EXPECT_EQ(i, *expected_it);
EXPECT_EQ(&i, &*expected_it);
++expected_it;
}
EXPECT_EQ(expected_it, a.end());
}
TEST(SizedInputViewTest, Array) {
int a[] = {1, 2, 3};
SizedInputView<int> v(a);
auto expected_it = std::begin(a);
for (const int& i : v) {
EXPECT_EQ(i, *expected_it);
EXPECT_EQ(&i, &*expected_it);
++expected_it;
}
EXPECT_EQ(expected_it, std::end(a));
}
TEST(SizedInputViewTest, AsFunctionArgument) {
std::list<int> a = {1, 2, 3};
auto f = [](const SizedInputView<int>& v) {
EXPECT_THAT(v, ElementsAre(1, 2, 3));
};
f(a);
}
TEST(SizedInputViewTest, InitializerListArgument) {
auto f = [](const SizedInputView<std::string>& v) {
EXPECT_THAT(v, ElementsAre("a", "b"));
};
f({"a", "b"});
}
TEST(SizedInputViewTest, EmptyConversion) {
std::vector<std::string> a;
SizedInputView<absl::string_view> v(a);
EXPECT_EQ(v.begin(), v.end());
}
TEST(SizedInputViewTest, Conversion) {
std::vector<std::string> a = {"a", "b", "c"};
SizedInputView<absl::string_view> v(a);
auto v_it = v.begin();
for (const std::string& s : a) {
EXPECT_EQ(v_it->data(), s.data());
++v_it;
}
}
TEST(SizedInputViewTest, NestedConversion) {
std::vector<std::list<std::string>> a = {{"a", "b"}, {"c", "d"}};
SizedInputView<SizedInputView<absl::string_view>> v(a);
auto v_it = v.begin();
EXPECT_THAT(*v_it, ElementsAre("a", "b"));
++v_it;
EXPECT_THAT(*v_it, ElementsAre("c", "d"));
++v_it;
EXPECT_EQ(v_it, v.end());
SizedInputView<SizedInputView<absl::string_view>> c(v);
v = {};
auto c_it = c.begin();
EXPECT_THAT(*c_it, ElementsAre("a", "b"));
++c_it;
EXPECT_THAT(*c_it, ElementsAre("c", "d"));
++c_it;
EXPECT_EQ(c_it, c.end());
}
TEST(SizedInputViewTest, LargeValueConversion) {
struct ConvertibleFromInt {
ConvertibleFromInt(int i)
: value(absl::StrCat(i)) {}
std::string value;
int padding[30];
std::vector<int> vec = {1, 2, 3};
};
std::vector<int> a(100);
std::iota(a.begin(), a.end(), 0);
SizedInputView<ConvertibleFromInt> v(a);
auto a_it = a.begin();
for (const auto& i : v) {
EXPECT_EQ(i.value, absl::StrCat(*a_it));
++a_it;
}
EXPECT_EQ(a_it, a.end());
SizedInputView<ConvertibleFromInt> v2(a);
auto it = v2.begin();
++it;
SizedInputView<ConvertibleFromInt> c(v);
v = {};
a_it = a.begin();
for (const auto& i : c) {
EXPECT_EQ(i.value, absl::StrCat(*a_it));
++a_it;
}
EXPECT_EQ(a_it, a.end());
}
TEST(SizedInputViewTest, StlInterop) {
std::vector<int> a = {1, 2, 3};
SizedInputView<int> v(a);
std::vector<int> b = {5, 6, 7};
b.insert(b.end(), v.begin(), v.end());
}
}
} | SizedInputView(SizedInputView&& other) noexcept
: begin_(std::move(other.begin_)), size_(other.size_) {
other.size_ = 0;
} | TEST(SizedInputViewTest, LargeValueConversion) {
struct ConvertibleFromInt {
ConvertibleFromInt(int i)
: value(absl::StrCat(i)) {}
std::string value;
int padding[30];
std::vector<int> vec = {1, 2, 3};
};
std::vector<int> a(100);
std::iota(a.begin(), a.end(), 0);
SizedInputView<ConvertibleFromInt> v(a);
auto a_it = a.begin();
for (const auto& i : v) {
EXPECT_EQ(i.value, absl::StrCat(*a_it));
++a_it;
}
EXPECT_EQ(a_it, a.end());
SizedInputView<ConvertibleFromInt> v2(a);
auto it = v2.begin();
++it;
SizedInputView<ConvertibleFromInt> c(v);
v = {};
a_it = a.begin();
for (const auto& i : c) {
EXPECT_EQ(i.value, absl::StrCat(*a_it));
++a_it;
}
EXPECT_EQ(a_it, a.end());
} |
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace floor_div {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
switch (type) {
case kTfLiteFloat32:
case kTfLiteInt32:
case kTfLiteInt16:
case kTfLiteInt8:
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) {
const T* denominator_data = GetTensorData<T>(input2);
for (int i = 0; i < NumElements(input2); ++i) {
if (std::equal_to<T>()(denominator_data[i], 0)) {
TF_LITE_KERNEL_LOG(context, "Division by 0");
return kTfLiteError;
}
}
if (requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), denominator_data, GetTensorShape(output),
GetTensorData<T>(output), reference_ops::FloorDiv<T>);
} else {
reference_ops::BinaryFunction<T, T, T>(
GetTensorShape(input1), GetTensorData<T>(input1),
GetTensorShape(input2), GetTensorData<T>(input2),
GetTensorShape(output), GetTensorData<T>(output),
reference_ops::FloorDiv<T>);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input1->type) {
case kTfLiteInt8: {
return EvalImpl<int8_t>(context, data->requires_broadcast, input1, input2,
output);
}
case kTfLiteInt16: {
return EvalImpl<int16_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
}
}
}
TfLiteRegistration* Register_FLOOR_DIV() {
static TfLiteRegistration r = {floor_div::Init, floor_div::Free,
floor_div::Prepare, floor_div::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
template <typename T>
class FloorDivModel : public SingleOpModel {
public:
FloorDivModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_FLOOR_DIV, BuiltinOptions_FloorDivOptions,
CreateFloorDivOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
};
TEST(FloorDivModel, Simple) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValue) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDiv) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
}
TEST(FloorDivModel, SimpleFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.05, 9.09, 11.9, 3.01});
model.PopulateTensor<float>(model.input2(), {2.05, 2.03, 3.03, 4.03});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(4.0, 4.0, 3.0, 0.0));
}
TEST(FloorDivModel, NegativeValueFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {2.0, 2.3, -3.0, -4.1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5.0, -5.0, 3.0, -2.0));
}
TEST(FloorDivModel, BroadcastFloorDivFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {-3.3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4.0, 2.0, 3.0, -3.0));
}
TEST(FloorDivModel, SimpleInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValueInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDivInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1}}, {TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
}
}
} | TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
switch (type) {
case kTfLiteFloat32:
case kTfLiteInt32:
case kTfLiteInt16:
case kTfLiteInt8:
break;
default:
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.",
TfLiteTypeGetName(type));
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
} | TEST(FloorDivModel, Simple) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValue) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDiv) {
FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int32_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
}
TEST(FloorDivModel, SimpleFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.05, 9.09, 11.9, 3.01});
model.PopulateTensor<float>(model.input2(), {2.05, 2.03, 3.03, 4.03});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(4.0, 4.0, 3.0, 0.0));
}
TEST(FloorDivModel, NegativeValueFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {2.0, 2.3, -3.0, -4.1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5.0, -5.0, 3.0, -2.0));
}
TEST(FloorDivModel, BroadcastFloorDivFloat) {
FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0});
model.PopulateTensor<float>(model.input2(), {-3.3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4.0, 2.0, 3.0, -3.0));
}
TEST(FloorDivModel, SimpleInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0));
}
TEST(FloorDivModel, NegativeValueInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2));
}
TEST(FloorDivModel, BroadcastFloorDivInt16) {
FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}},
{TensorType_INT16, {1}}, {TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7});
model.PopulateTensor<int16_t>(model.input2(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3));
} |
#ifndef ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_
#define ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/pool_urbg.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
class RandenPoolSeedSeq {
private:
struct ContiguousTag {};
struct BufferTag {};
template <typename Contiguous>
void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) {
const size_t n = static_cast<size_t>(std::distance(begin, end));
auto* a = &(*begin);
RandenPool<uint8_t>::Fill(
absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n));
}
template <typename RandomAccessIterator>
void generate_impl(BufferTag, RandomAccessIterator begin,
RandomAccessIterator end) {
const size_t n = std::distance(begin, end);
absl::InlinedVector<uint32_t, 8> data(n, 0);
RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
std::copy(std::begin(data), std::end(data), begin);
}
public:
using result_type = uint32_t;
size_t size() { return 0; }
template <typename OutIterator>
void param(OutIterator) const {}
template <typename RandomAccessIterator>
void generate(RandomAccessIterator begin, RandomAccessIterator end) {
if (begin != end) {
using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
using TagType = absl::conditional_t<
(std::is_pointer<RandomAccessIterator>::value ||
std::is_same<RandomAccessIterator,
typename std::vector<U>::iterator>::value),
ContiguousTag, BufferTag>;
generate_impl(TagType{}, begin, end);
}
}
};
template <typename URBG, typename Seeder = RandenPoolSeedSeq>
class NonsecureURBGBase {
public:
using result_type = typename URBG::result_type;
NonsecureURBGBase() : urbg_(ConstructURBG()) {}
NonsecureURBGBase(const NonsecureURBGBase&) = delete;
NonsecureURBGBase& operator=(const NonsecureURBGBase&) = delete;
NonsecureURBGBase(NonsecureURBGBase&&) = default;
NonsecureURBGBase& operator=(NonsecureURBGBase&&) = default;
template <class SSeq, typename = typename absl::enable_if_t<
!std::is_same<SSeq, NonsecureURBGBase>::value>>
explicit NonsecureURBGBase(SSeq&& seq)
: urbg_(ConstructURBG(std::forward<SSeq>(seq))) {}
static constexpr result_type(min)() { return (URBG::min)(); }
static constexpr result_type(max)() { return (URBG::max)(); }
result_type operator()() { return urbg_(); }
void discard(unsigned long long values) {
urbg_.discard(values);
}
bool operator==(const NonsecureURBGBase& other) const {
return urbg_ == other.urbg_;
}
bool operator!=(const NonsecureURBGBase& other) const {
return !(urbg_ == other.urbg_);
}
private:
static URBG ConstructURBG() {
Seeder seeder;
return URBG(seeder);
}
template <typename SSeq>
static URBG ConstructURBG(SSeq&& seq) {
auto salted_seq =
random_internal::MakeSaltedSeedSeq(std::forward<SSeq>(seq));
return URBG(salted_seq);
}
URBG urbg_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/nonsecure_base.h"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <memory>
#include <random>
#include <sstream>
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
namespace {
using ExampleNonsecureURBG =
absl::random_internal::NonsecureURBGBase<std::mt19937>;
template <typename T>
void Use(const T&) {}
}
TEST(NonsecureURBGBase, DefaultConstructorIsValid) {
ExampleNonsecureURBG urbg;
}
TEST(RecommendedTemplates, CanBeConstructed) {
absl::BitGen default_generator;
absl::InsecureBitGen insecure_generator;
}
TEST(RecommendedTemplates, CanDiscardValues) {
absl::BitGen default_generator;
absl::InsecureBitGen insecure_generator;
default_generator.discard(5);
insecure_generator.discard(5);
}
TEST(NonsecureURBGBase, StandardInterface) {
using E = absl::random_internal::NonsecureURBGBase<std::minstd_rand>;
using T = typename E::result_type;
static_assert(!std::is_copy_constructible<E>::value,
"NonsecureURBGBase should not be copy constructible");
static_assert(!absl::is_copy_assignable<E>::value,
"NonsecureURBGBase should not be copy assignable");
static_assert(std::is_move_constructible<E>::value,
"NonsecureURBGBase should be move constructible");
static_assert(absl::is_move_assignable<E>::value,
"NonsecureURBGBase should be move assignable");
static_assert(std::is_same<decltype(std::declval<E>()()), T>::value,
"return type of operator() must be result_type");
{
const E x, y;
Use(x);
Use(y);
static_assert(std::is_same<decltype(x == y), bool>::value,
"return type of operator== must be bool");
static_assert(std::is_same<decltype(x != y), bool>::value,
"return type of operator== must be bool");
}
E e;
std::seed_seq q{1, 2, 3};
E{};
E{q};
{
E tmp(q);
E m = std::move(tmp);
E n(std::move(m));
EXPECT_TRUE(e != n);
}
{
E a(q);
E b(q);
EXPECT_TRUE(a != e);
EXPECT_TRUE(a == b);
a();
EXPECT_TRUE(a != b);
}
unsigned long long z = 1;
e.discard(z);
}
TEST(NonsecureURBGBase, SeedSeqConstructorIsValid) {
std::seed_seq seq;
ExampleNonsecureURBG rbg(seq);
}
TEST(NonsecureURBGBase, CompatibleWithDistributionUtils) {
ExampleNonsecureURBG rbg;
absl::Uniform(rbg, 0, 100);
absl::Uniform(rbg, 0.5, 0.7);
absl::Poisson<uint32_t>(rbg);
absl::Exponential<float>(rbg);
}
TEST(NonsecureURBGBase, CompatibleWithStdDistributions) {
ExampleNonsecureURBG rbg;
static_cast<void>(std::uniform_int_distribution<uint32_t>(0, 100)(rbg));
static_cast<void>(std::uniform_real_distribution<float>()(rbg));
static_cast<void>(std::bernoulli_distribution(0.2)(rbg));
}
TEST(NonsecureURBGBase, ConsecutiveDefaultInstancesYieldUniqueVariates) {
const size_t kNumSamples = 128;
ExampleNonsecureURBG rbg1;
ExampleNonsecureURBG rbg2;
for (size_t i = 0; i < kNumSamples; i++) {
EXPECT_NE(rbg1(), rbg2());
}
}
TEST(NonsecureURBGBase, EqualSeedSequencesYieldEqualVariates) {
std::seed_seq seq;
ExampleNonsecureURBG rbg1(seq);
ExampleNonsecureURBG rbg2(seq);
for (uint32_t i = 0; i < 1000; i++) {
EXPECT_EQ(rbg1(), rbg2());
}
rbg1.discard(100);
rbg2.discard(100);
for (uint32_t i = 0; i < 1000; i++) {
EXPECT_EQ(rbg1(), rbg2());
}
}
TEST(RandenPoolSeedSeqTest, SeederWorksForU32) {
absl::random_internal::RandenPoolSeedSeq seeder;
uint32_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForU64) {
absl::random_internal::RandenPoolSeedSeq seeder;
uint64_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
EXPECT_FALSE((state[0] >> 32) == 0 && (state[1] >> 32) == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForS32) {
absl::random_internal::RandenPoolSeedSeq seeder;
int32_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForVector) {
absl::random_internal::RandenPoolSeedSeq seeder;
std::vector<uint32_t> state(2);
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
} | result_type operator()() { return urbg_(); } | TEST(NonsecureURBGBase, StandardInterface) {
using E = absl::random_internal::NonsecureURBGBase<std::minstd_rand>;
using T = typename E::result_type;
static_assert(!std::is_copy_constructible<E>::value,
"NonsecureURBGBase should not be copy constructible");
static_assert(!absl::is_copy_assignable<E>::value,
"NonsecureURBGBase should not be copy assignable");
static_assert(std::is_move_constructible<E>::value,
"NonsecureURBGBase should be move constructible");
static_assert(absl::is_move_assignable<E>::value,
"NonsecureURBGBase should be move assignable");
static_assert(std::is_same<decltype(std::declval<E>()()), T>::value,
"return type of operator() must be result_type");
{
const E x, y;
Use(x);
Use(y);
static_assert(std::is_same<decltype(x == y), bool>::value,
"return type of operator== must be bool");
static_assert(std::is_same<decltype(x != y), bool>::value,
"return type of operator== must be bool");
}
E e;
std::seed_seq q{1, 2, 3};
E{};
E{q};
{
E tmp(q);
E m = std::move(tmp);
E n(std::move(m));
EXPECT_TRUE(e != n);
}
{
E a(q);
E b(q);
EXPECT_TRUE(a != e);
EXPECT_TRUE(a == b);
a();
EXPECT_TRUE(a != b);
}
unsigned long long z = 1;
e.discard(z);
}
TEST(NonsecureURBGBase, CompatibleWithDistributionUtils) {
ExampleNonsecureURBG rbg;
absl::Uniform(rbg, 0, 100);
absl::Uniform(rbg, 0.5, 0.7);
absl::Poisson<uint32_t>(rbg);
absl::Exponential<float>(rbg);
}
TEST(NonsecureURBGBase, CompatibleWithStdDistributions) {
ExampleNonsecureURBG rbg;
static_cast<void>(std::uniform_int_distribution<uint32_t>(0, 100)(rbg));
static_cast<void>(std::uniform_real_distribution<float>()(rbg));
static_cast<void>(std::bernoulli_distribution(0.2)(rbg));
}
TEST(NonsecureURBGBase, ConsecutiveDefaultInstancesYieldUniqueVariates) {
const size_t kNumSamples = 128;
ExampleNonsecureURBG rbg1;
ExampleNonsecureURBG rbg2;
for (size_t i = 0; i < kNumSamples; i++) {
EXPECT_NE(rbg1(), rbg2());
}
}
TEST(NonsecureURBGBase, EqualSeedSequencesYieldEqualVariates) {
std::seed_seq seq;
ExampleNonsecureURBG rbg1(seq);
ExampleNonsecureURBG rbg2(seq);
for (uint32_t i = 0; i < 1000; i++) {
EXPECT_EQ(rbg1(), rbg2());
}
rbg1.discard(100);
rbg2.discard(100);
for (uint32_t i = 0; i < 1000; i++) {
EXPECT_EQ(rbg1(), rbg2());
}
} |
#include "arolla/util/fingerprint.h"
#include <cstddef>
#include <cstdint>
#include <ostream>
#include <string>
#include "absl/hash/hash.h"
#include "absl/numeric/int128.h"
#include "absl/random/random.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "cityhash/city.h"
#include "arolla/util/types.h"
namespace arolla {
namespace {
uint32_t RuntimeSeed() {
static uint32_t result = absl::Hash<int>{}(501816262);
return result;
}
}
std::string Fingerprint::AsString() const {
return absl::StrFormat("%032x", value);
}
signed_size_t Fingerprint::PythonHash() const {
return absl::Hash<Fingerprint>()(*this);
}
std::ostream& operator<<(std::ostream& ostream,
const Fingerprint& fingerprint) {
return ostream << absl::StreamFormat("%032x", fingerprint.value);
}
Fingerprint RandomFingerprint() {
absl::BitGen bitgen;
return Fingerprint{absl::MakeUint128(absl::Uniform<uint64_t>(bitgen),
absl::Uniform<uint64_t>(bitgen))};
}
FingerprintHasher::FingerprintHasher(absl::string_view salt)
: state_{3102879407, 2758948377}
{
Combine(RuntimeSeed(), salt);
}
Fingerprint FingerprintHasher::Finish() && {
return Fingerprint{absl::MakeUint128(state_.second, state_.first)};
}
void FingerprintHasher::CombineRawBytes(const void* data, size_t size) {
state_ = cityhash::CityHash128WithSeed(
static_cast<const char*>(data), size, state_);
}
} | #include "arolla/util/fingerprint.h"
#include <limits>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "arolla/util/struct_field.h"
namespace arolla {
namespace {
static_assert(
std::is_trivially_constructible_v<Fingerprint>,
"Make sure that fingerprint is trivially constructed, so that adding it to "
"a struct does not slow down the struct's initialization time.");
struct A {};
static_assert(!std::is_default_constructible_v<FingerprintHasherTraits<A>>);
struct AWithFingerPrintMethod {
void ArollaFingerprint(FingerprintHasher* hasher) const {
hasher->Combine(19);
}
};
struct AWithStructFields {
int a;
double b;
constexpr static auto ArollaStructFields() {
using CppType = AWithStructFields;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_DECLARE_STRUCT_FIELD(b),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
template <typename... Ts>
Fingerprint MakeDummyFingerprint(const Ts&... values) {
return FingerprintHasher("dummy-salt").Combine(values...).Finish();
}
TEST(FingerprintTest, Empty) {
Fingerprint fgpt{};
EXPECT_EQ(fgpt.AsString(), "00000000000000000000000000000000");
}
TEST(FingerprintTest, RandomFingerprint) {
constexpr int N = 1024;
absl::flat_hash_set<Fingerprint> set;
set.reserve(N);
for (int i = 0; i < N; ++i) {
set.insert(RandomFingerprint());
}
EXPECT_EQ(set.size(), N);
}
TEST(FingerprintTest, AWithFingerPrintMethod) {
EXPECT_EQ(MakeDummyFingerprint(AWithFingerPrintMethod()),
MakeDummyFingerprint(19));
}
TEST(FingerprintTest, AWithStructFields) {
EXPECT_EQ(MakeDummyFingerprint(AWithStructFields{.a = 5, .b = 7.}),
MakeDummyFingerprint(5, 7.));
}
TEST(FingerprintTest, TestPrimitives) {
EXPECT_NE(MakeDummyFingerprint(5), MakeDummyFingerprint(6));
EXPECT_NE(MakeDummyFingerprint<std::string>("5"),
MakeDummyFingerprint<std::string>("6"));
}
TEST(FingerprintTest, FloatingPointZero) {
EXPECT_NE(MakeDummyFingerprint(0.0).PythonHash(),
MakeDummyFingerprint(-0.0).PythonHash());
EXPECT_NE(MakeDummyFingerprint(0.f).PythonHash(),
MakeDummyFingerprint(-0.f).PythonHash());
}
TEST(FingerprintTest, FloatingPointNAN) {
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<float>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<float>::quiet_NaN())
.PythonHash());
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<double>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<double>::quiet_NaN())
.PythonHash());
}
TEST(FingerprintTest, PythonHash) {
EXPECT_EQ(MakeDummyFingerprint(4).PythonHash(),
MakeDummyFingerprint(4).PythonHash());
EXPECT_NE(MakeDummyFingerprint(5).PythonHash(),
MakeDummyFingerprint(6).PythonHash());
}
TEST(FingerprintTest, Less) {
EXPECT_LT(Fingerprint{27}, Fingerprint{37});
EXPECT_FALSE(Fingerprint{27} < Fingerprint{27});
}
TEST(FingerprintTest, CombineRawBytes) {
{
FingerprintHasher h1("dummy-salt");
FingerprintHasher h2("dummy-salt");
h1.CombineRawBytes("foobar", 6);
h2.CombineRawBytes("foobar", 6);
EXPECT_EQ(std::move(h1).Finish(), std::move(h2).Finish());
}
{
FingerprintHasher h1("dummy-salt");
FingerprintHasher h2("dummy-salt");
h1.CombineRawBytes("foobar", 6);
h2.CombineRawBytes("barfoo", 6);
EXPECT_NE(std::move(h1).Finish(), std::move(h2).Finish());
}
}
class Circle {
public:
Circle(int x, int y, int r) : center_(x, y), radius_(r) {
FingerprintHasher hasher("arolla::TestCircle");
hasher.Combine(center_.first, center_.second, radius_);
fingerprint_ = std::move(hasher).Finish();
}
const Fingerprint& fingerprint() { return fingerprint_; }
private:
std::pair<int, int> center_;
int radius_;
Fingerprint fingerprint_;
};
TEST(FingerprintTest, UserDefined) {
EXPECT_NE(Circle(0, 0, 1).fingerprint(), Circle(0, 0, 2).fingerprint());
EXPECT_NE(Circle(1, 1, 1).fingerprint(), Circle(0, 0, 1).fingerprint());
}
TEST(FingerprintTest, HasArollaFingerprintMethodRegression) {
struct OverloadedType {
int ArollaFingerprint() const { return 0; }
void ArollaFingerprint(FingerprintHasher*) const {}
};
EXPECT_TRUE(
fingerprint_impl::HasArollaFingerprintMethod<OverloadedType>::value);
struct WrongType {
int ArollaFingerprint() const { return 0; }
};
EXPECT_FALSE(fingerprint_impl::HasArollaFingerprintMethod<WrongType>::value);
}
}
} | signed_size_t Fingerprint::PythonHash() const {
return absl::Hash<Fingerprint>()(*this);
} | TEST(FingerprintTest, FloatingPointZero) {
EXPECT_NE(MakeDummyFingerprint(0.0).PythonHash(),
MakeDummyFingerprint(-0.0).PythonHash());
EXPECT_NE(MakeDummyFingerprint(0.f).PythonHash(),
MakeDummyFingerprint(-0.f).PythonHash());
}
TEST(FingerprintTest, FloatingPointNAN) {
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<float>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<float>::quiet_NaN())
.PythonHash());
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<double>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<double>::quiet_NaN())
.PythonHash());
}
TEST(FingerprintTest, PythonHash) {
EXPECT_EQ(MakeDummyFingerprint(4).PythonHash(),
MakeDummyFingerprint(4).PythonHash());
EXPECT_NE(MakeDummyFingerprint(5).PythonHash(),
MakeDummyFingerprint(6).PythonHash());
} |
#include "absl/time/time.h"
#if defined(_MSC_VER)
#include <winsock2.h>
#endif
#include <cstring>
#include <ctime>
#include <limits>
#include "absl/time/internal/cctz/include/cctz/civil_time.h"
#include "absl/time/internal/cctz/include/cctz/time_zone.h"
namespace cctz = absl::time_internal::cctz;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
inline cctz::time_point<cctz::seconds> unix_epoch() {
return std::chrono::time_point_cast<cctz::seconds>(
std::chrono::system_clock::from_time_t(0));
}
inline int64_t FloorToUnit(absl::Duration d, absl::Duration unit) {
absl::Duration rem;
int64_t q = absl::IDivDuration(d, unit, &rem);
return (q > 0 || rem >= ZeroDuration() ||
q == std::numeric_limits<int64_t>::min())
? q
: q - 1;
}
ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
inline absl::Time::Breakdown InfiniteFutureBreakdown() {
absl::Time::Breakdown bd;
bd.year = std::numeric_limits<int64_t>::max();
bd.month = 12;
bd.day = 31;
bd.hour = 23;
bd.minute = 59;
bd.second = 59;
bd.subsecond = absl::InfiniteDuration();
bd.weekday = 4;
bd.yearday = 365;
bd.offset = 0;
bd.is_dst = false;
bd.zone_abbr = "-00";
return bd;
}
inline absl::Time::Breakdown InfinitePastBreakdown() {
Time::Breakdown bd;
bd.year = std::numeric_limits<int64_t>::min();
bd.month = 1;
bd.day = 1;
bd.hour = 0;
bd.minute = 0;
bd.second = 0;
bd.subsecond = -absl::InfiniteDuration();
bd.weekday = 7;
bd.yearday = 1;
bd.offset = 0;
bd.is_dst = false;
bd.zone_abbr = "-00";
return bd;
}
ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
inline absl::TimeZone::CivilInfo InfiniteFutureCivilInfo() {
TimeZone::CivilInfo ci;
ci.cs = CivilSecond::max();
ci.subsecond = InfiniteDuration();
ci.offset = 0;
ci.is_dst = false;
ci.zone_abbr = "-00";
return ci;
}
inline absl::TimeZone::CivilInfo InfinitePastCivilInfo() {
TimeZone::CivilInfo ci;
ci.cs = CivilSecond::min();
ci.subsecond = -InfiniteDuration();
ci.offset = 0;
ci.is_dst = false;
ci.zone_abbr = "-00";
return ci;
}
ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
inline absl::TimeConversion InfiniteFutureTimeConversion() {
absl::TimeConversion tc;
tc.pre = tc.trans = tc.post = absl::InfiniteFuture();
tc.kind = absl::TimeConversion::UNIQUE;
tc.normalized = true;
return tc;
}
inline TimeConversion InfinitePastTimeConversion() {
absl::TimeConversion tc;
tc.pre = tc.trans = tc.post = absl::InfinitePast();
tc.kind = absl::TimeConversion::UNIQUE;
tc.normalized = true;
return tc;
}
ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
Time MakeTimeWithOverflow(const cctz::time_point<cctz::seconds>& sec,
const cctz::civil_second& cs,
const cctz::time_zone& tz,
bool* normalized = nullptr) {
const auto max = cctz::time_point<cctz::seconds>::max();
const auto min = cctz::time_point<cctz::seconds>::min();
if (sec == max) {
const auto al = tz.lookup(max);
if (cs > al.cs) {
if (normalized) *normalized = true;
return absl::InfiniteFuture();
}
}
if (sec == min) {
const auto al = tz.lookup(min);
if (cs < al.cs) {
if (normalized) *normalized = true;
return absl::InfinitePast();
}
}
const auto hi = (sec - unix_epoch()).count();
return time_internal::FromUnixDuration(time_internal::MakeDuration(hi));
}
inline int MapWeekday(const cctz::weekday& wd) {
switch (wd) {
case cctz::weekday::monday:
return 1;
case cctz::weekday::tuesday:
return 2;
case cctz::weekday::wednesday:
return 3;
case cctz::weekday::thursday:
return 4;
case cctz::weekday::friday:
return 5;
case cctz::weekday::saturday:
return 6;
case cctz::weekday::sunday:
return 7;
}
return 1;
}
bool FindTransition(const cctz::time_zone& tz,
bool (cctz::time_zone::*find_transition)(
const cctz::time_point<cctz::seconds>& tp,
cctz::time_zone::civil_transition* trans) const,
Time t, TimeZone::CivilTransition* trans) {
const auto tp = unix_epoch() + cctz::seconds(ToUnixSeconds(t));
cctz::time_zone::civil_transition tr;
if (!(tz.*find_transition)(tp, &tr)) return false;
trans->from = CivilSecond(tr.from);
trans->to = CivilSecond(tr.to);
return true;
}
}
ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
absl::Time::Breakdown Time::In(absl::TimeZone tz) const {
if (*this == absl::InfiniteFuture()) return InfiniteFutureBreakdown();
if (*this == absl::InfinitePast()) return InfinitePastBreakdown();
const auto tp = unix_epoch() + cctz::seconds(time_internal::GetRepHi(rep_));
const auto al = cctz::time_zone(tz).lookup(tp);
const auto cs = al.cs;
const auto cd = cctz::civil_day(cs);
absl::Time::Breakdown bd;
bd.year = cs.year();
bd.month = cs.month();
bd.day = cs.day();
bd.hour = cs.hour();
bd.minute = cs.minute();
bd.second = cs.second();
bd.subsecond = time_internal::MakeDuration(0, time_internal::GetRepLo(rep_));
bd.weekday = MapWeekday(cctz::get_weekday(cd));
bd.yearday = cctz::get_yearday(cd);
bd.offset = al.offset;
bd.is_dst = al.is_dst;
bd.zone_abbr = al.abbr;
return bd;
}
ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
absl::Time FromUDate(double udate) {
return time_internal::FromUnixDuration(absl::Milliseconds(udate));
}
absl::Time FromUniversal(int64_t universal) {
return absl::UniversalEpoch() + 100 * absl::Nanoseconds(universal);
}
int64_t ToUnixNanos(Time t) {
if (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >= 0 &&
time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >> 33 == 0) {
return (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) *
1000 * 1000 * 1000) +
(time_internal::GetRepLo(time_internal::ToUnixDuration(t)) / 4);
}
return FloorToUnit(time_internal::ToUnixDuration(t), absl::Nanoseconds(1));
}
int64_t ToUnixMicros(Time t) {
if (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >= 0 &&
time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >> 43 == 0) {
return (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) *
1000 * 1000) +
(time_internal::GetRepLo(time_internal::ToUnixDuration(t)) / 4000);
}
return FloorToUnit(time_internal::ToUnixDuration(t), absl::Microseconds(1));
}
int64_t ToUnixMillis(Time t) {
if (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >= 0 &&
time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >> 53 == 0) {
return (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) * 1000) +
(time_internal::GetRepLo(time_internal::ToUnixDuration(t)) /
(4000 * 1000));
}
return FloorToUnit(time_internal::ToUnixDuration(t), absl::Milliseconds(1));
}
int64_t ToUnixSeconds(Time t) {
return time_internal::GetRepHi(time_internal::ToUnixDuration(t));
}
time_t ToTimeT(Time t) { return absl::ToTimespec(t).tv_sec; }
double ToUDate(Time t) {
return absl::FDivDuration(time_internal::ToUnixDuration(t),
absl::Milliseconds(1));
}
int64_t ToUniversal(absl::Time t) {
return absl::FloorToUnit(t - absl::UniversalEpoch(), absl::Nanoseconds(100));
}
absl::Time TimeFromTimespec(timespec ts) {
return time_internal::FromUnixDuration(absl::DurationFromTimespec(ts));
}
absl::Time TimeFromTimeval(timeval tv) {
return time_internal::FromUnixDuration(absl::DurationFromTimeval(tv));
}
timespec ToTimespec(Time t) {
timespec ts;
absl::Duration d = time_internal::ToUnixDuration(t);
if (!time_internal::IsInfiniteDuration(d)) {
ts.tv_sec = static_cast<decltype(ts.tv_sec)>(time_internal::GetRepHi(d));
if (ts.tv_sec == time_internal::GetRepHi(d)) {
ts.tv_nsec = time_internal::GetRepLo(d) / 4;
return ts;
}
}
if (d >= absl::ZeroDuration()) {
ts.tv_sec = std::numeric_limits<time_t>::max();
ts.tv_nsec = 1000 * 1000 * 1000 - 1;
} else {
ts.tv_sec = std::numeric_limits<time_t>::min();
ts.tv_nsec = 0;
}
return ts;
}
timeval ToTimeval(Time t) {
timeval tv;
timespec ts = absl::ToTimespec(t);
tv.tv_sec = static_cast<decltype(tv.tv_sec)>(ts.tv_sec);
if (tv.tv_sec != ts.tv_sec) {
if (ts.tv_sec < 0) {
tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();
tv.tv_usec = 0;
} else {
tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::max();
tv.tv_usec = 1000 * 1000 - 1;
}
return tv;
}
tv.tv_usec = static_cast<int>(ts.tv_nsec / 1000);
return tv;
}
Time FromChrono(const std::chrono::system_clock::time_point& tp) {
return time_internal::FromUnixDuration(time_internal::FromChrono(
tp - std::chrono::system_clock::from_time_t(0)));
}
std::chrono::system_clock::time_point ToChronoTime(absl::Time t) {
using D = std::chrono::system_clock::duration;
auto d = time_internal::ToUnixDuration(t);
if (d < ZeroDuration()) d = Floor(d, FromChrono(D{1}));
return std::chrono::system_clock::from_time_t(0) +
time_internal::ToChronoDuration<D>(d);
}
absl::TimeZone::CivilInfo TimeZone::At(Time t) const {
if (t == absl::InfiniteFuture()) return InfiniteFutureCivilInfo();
if (t == absl::InfinitePast()) return InfinitePastCivilInfo();
const auto ud = time_internal::ToUnixDuration(t);
const auto tp = unix_epoch() + cctz::seconds(time_internal::GetRepHi(ud));
const auto al = cz_.lookup(tp);
TimeZone::CivilInfo ci;
ci.cs = CivilSecond(al.cs);
ci.subsecond = time_internal::MakeDuration(0, time_internal::GetRepLo(ud));
ci.offset = al.offset;
ci.is_dst = al.is_dst;
ci.zone_abbr = al.abbr;
return ci;
}
absl::TimeZone::TimeInfo TimeZone::At(CivilSecond ct) const {
const cctz::civil_second cs(ct);
const auto cl = cz_.lookup(cs);
TimeZone::TimeInfo ti;
switch (cl.kind) {
case cctz::time_zone::civil_lookup::UNIQUE:
ti.kind = TimeZone::TimeInfo::UNIQUE;
break;
case cctz::time_zone::civil_lookup::SKIPPED:
ti.kind = TimeZone::TimeInfo::SKIPPED;
break;
case cctz::time_zone::civil_lookup::REPEATED:
ti.kind = TimeZone::TimeInfo::REPEATED;
break;
}
ti.pre = MakeTimeWithOverflow(cl.pre, cs, cz_);
ti.trans = MakeTimeWithOverflow(cl.trans, cs, cz_);
ti.post = MakeTimeWithOverflow(cl.post, cs, cz_);
return ti;
}
bool TimeZone::NextTransition(Time t, CivilTransition* trans) const {
return FindTransition(cz_, &cctz::time_zone::next_transition, t, trans);
}
bool TimeZone::PrevTransition(Time t, CivilTransition* trans) const {
return FindTransition(cz_, &cctz::time_zone::prev_transition, t, trans);
}
ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
absl::TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
int min, int sec, TimeZone tz) {
if (year > 300000000000) return InfiniteFutureTimeConversion();
if (year < -300000000000) return InfinitePastTimeConversion();
const CivilSecond cs(year, mon, day, hour, min, sec);
const auto ti = tz.At(cs);
TimeConversion tc;
tc.pre = ti.pre;
tc.trans = ti.trans;
tc.post = ti.post;
switch (ti.kind) {
case TimeZone::TimeInfo::UNIQUE:
tc.kind = TimeConversion::UNIQUE;
break;
case TimeZone::TimeInfo::SKIPPED:
tc.kind = TimeConversion::SKIPPED;
break;
case TimeZone::TimeInfo::REPEATED:
tc.kind = TimeConversion::REPEATED;
break;
}
tc.normalized = false;
if (year != cs.year() || mon != cs.month() || day != cs.day() ||
hour != cs.hour() || min != cs.minute() || sec != cs.second()) {
tc.normalized = true;
}
return tc;
}
ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
absl::Time FromTM(const struct tm& tm, absl::TimeZone tz) {
civil_year_t tm_year = tm.tm_year;
if (tm_year > 300000000000ll) return InfiniteFuture();
if (tm_year < -300000000000ll) return InfinitePast();
int tm_mon = tm.tm_mon;
if (tm_mon == std::numeric_limits<int>::max()) {
tm_mon -= 12;
tm_year += 1;
}
const auto ti = tz.At(CivilSecond(tm_year + 1900, tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec));
return tm.tm_isdst == 0 ? ti.post : ti.pre;
}
struct tm ToTM(absl::Time t, absl::TimeZone tz) {
struct tm tm = {};
const auto ci = tz.At(t);
const auto& cs = ci.cs;
tm.tm_sec = cs.second();
tm.tm_min = cs.minute();
tm.tm_hour = cs.hour();
tm.tm_mday = cs.day();
tm.tm_mon = cs.month() - 1;
if (cs.year() < std::numeric_limits<int>::min() + 1900) {
tm.tm_year = std::numeric_limits<int>::min();
} else if (cs.year() > std::numeric_limits<int>::max()) {
tm.tm_year = std::numeric_limits<int>::max() - 1900;
} else {
tm.tm_year = static_cast<int>(cs.year() - 1900);
}
switch (GetWeekday(cs)) {
case Weekday::sunday:
tm.tm_wday = 0;
break;
case Weekday::monday:
tm.tm_wday = 1;
break;
case Weekday::tuesday:
tm.tm_wday = 2;
break;
case Weekday::wednesday:
tm.tm_wday = 3;
break;
case Weekday::thursday:
tm.tm_wday = 4;
break;
case Weekday::friday:
tm.tm_wday = 5;
break;
case Weekday::saturday:
tm.tm_wday = 6;
break;
}
tm.tm_yday = GetYearDay(cs) - 1;
tm.tm_isdst = ci.is_dst ? 1 : 0;
return tm;
}
ABSL_NAMESPACE_END
} | #include "absl/time/time.h"
#include <cstdint>
#include <ios>
#include "absl/time/civil_time.h"
#if defined(_MSC_VER)
#include <winsock2.h>
#endif
#include <chrono>
#include <cstring>
#include <ctime>
#include <iomanip>
#include <limits>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/numeric/int128.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/internal/test_util.h"
namespace {
#if defined(GTEST_USES_SIMPLE_RE) && GTEST_USES_SIMPLE_RE
const char kZoneAbbrRE[] = ".*";
#else
const char kZoneAbbrRE[] = "[A-Za-z]{3,4}|[-+][0-9]{2}([0-9]{2})?";
#endif
#define EXPECT_CIVIL_INFO(ci, y, m, d, h, min, s, off, isdst) \
do { \
EXPECT_EQ(y, ci.cs.year()); \
EXPECT_EQ(m, ci.cs.month()); \
EXPECT_EQ(d, ci.cs.day()); \
EXPECT_EQ(h, ci.cs.hour()); \
EXPECT_EQ(min, ci.cs.minute()); \
EXPECT_EQ(s, ci.cs.second()); \
EXPECT_EQ(off, ci.offset); \
EXPECT_EQ(isdst, ci.is_dst); \
EXPECT_THAT(ci.zone_abbr, testing::MatchesRegex(kZoneAbbrRE)); \
} while (0)
MATCHER_P(TimespecMatcher, ts, "") {
if (ts.tv_sec == arg.tv_sec && ts.tv_nsec == arg.tv_nsec) return true;
*result_listener << "expected: {" << ts.tv_sec << ", " << ts.tv_nsec << "} ";
*result_listener << "actual: {" << arg.tv_sec << ", " << arg.tv_nsec << "}";
return false;
}
MATCHER_P(TimevalMatcher, tv, "") {
if (tv.tv_sec == arg.tv_sec && tv.tv_usec == arg.tv_usec) return true;
*result_listener << "expected: {" << tv.tv_sec << ", " << tv.tv_usec << "} ";
*result_listener << "actual: {" << arg.tv_sec << ", " << arg.tv_usec << "}";
return false;
}
TEST(Time, ConstExpr) {
constexpr absl::Time t0 = absl::UnixEpoch();
static_assert(t0 == absl::UnixEpoch(), "UnixEpoch");
constexpr absl::Time t1 = absl::InfiniteFuture();
static_assert(t1 != absl::UnixEpoch(), "InfiniteFuture");
constexpr absl::Time t2 = absl::InfinitePast();
static_assert(t2 != absl::UnixEpoch(), "InfinitePast");
constexpr absl::Time t3 = absl::FromUnixNanos(0);
static_assert(t3 == absl::UnixEpoch(), "FromUnixNanos");
constexpr absl::Time t4 = absl::FromUnixMicros(0);
static_assert(t4 == absl::UnixEpoch(), "FromUnixMicros");
constexpr absl::Time t5 = absl::FromUnixMillis(0);
static_assert(t5 == absl::UnixEpoch(), "FromUnixMillis");
constexpr absl::Time t6 = absl::FromUnixSeconds(0);
static_assert(t6 == absl::UnixEpoch(), "FromUnixSeconds");
constexpr absl::Time t7 = absl::FromTimeT(0);
static_assert(t7 == absl::UnixEpoch(), "FromTimeT");
}
TEST(Time, ValueSemantics) {
absl::Time a;
absl::Time b = a;
EXPECT_EQ(a, b);
absl::Time c(a);
EXPECT_EQ(a, b);
EXPECT_EQ(a, c);
EXPECT_EQ(b, c);
b = c;
EXPECT_EQ(a, b);
EXPECT_EQ(a, c);
EXPECT_EQ(b, c);
}
TEST(Time, UnixEpoch) {
const auto ci = absl::UTCTimeZone().At(absl::UnixEpoch());
EXPECT_EQ(absl::CivilSecond(1970, 1, 1, 0, 0, 0), ci.cs);
EXPECT_EQ(absl::ZeroDuration(), ci.subsecond);
EXPECT_EQ(absl::Weekday::thursday, absl::GetWeekday(ci.cs));
}
TEST(Time, Breakdown) {
absl::TimeZone tz = absl::time_internal::LoadTimeZone("America/New_York");
absl::Time t = absl::UnixEpoch();
auto ci = tz.At(t);
EXPECT_CIVIL_INFO(ci, 1969, 12, 31, 19, 0, 0, -18000, false);
EXPECT_EQ(absl::ZeroDuration(), ci.subsecond);
EXPECT_EQ(absl::Weekday::wednesday, absl::GetWeekday(ci.cs));
t -= absl::Nanoseconds(1);
ci = tz.At(t);
EXPECT_CIVIL_INFO(ci, 1969, 12, 31, 18, 59, 59, -18000, false);
EXPECT_EQ(absl::Nanoseconds(999999999), ci.subsecond);
EXPECT_EQ(absl::Weekday::wednesday, absl::GetWeekday(ci.cs));
t += absl::Hours(24) * 2735;
t += absl::Hours(18) + absl::Minutes(30) + absl::Seconds(15) +
absl::Nanoseconds(9);
ci = tz.At(t);
EXPECT_CIVIL_INFO(ci, 1977, 6, 28, 14, 30, 15, -14400, true);
EXPECT_EQ(8, ci.subsecond / absl::Nanoseconds(1));
EXPECT_EQ(absl::Weekday::tuesday, absl::GetWeekday(ci.cs));
}
TEST(Time, AdditiveOperators) {
const absl::Duration d = absl::Nanoseconds(1);
const absl::Time t0;
const absl::Time t1 = t0 + d;
EXPECT_EQ(d, t1 - t0);
EXPECT_EQ(-d, t0 - t1);
EXPECT_EQ(t0, t1 - d);
absl::Time t(t0);
EXPECT_EQ(t0, t);
t += d;
EXPECT_EQ(t0 + d, t);
EXPECT_EQ(d, t - t0);
t -= d;
EXPECT_EQ(t0, t);
t = absl::UnixEpoch();
t += absl::Milliseconds(500);
EXPECT_EQ(absl::UnixEpoch() + absl::Milliseconds(500), t);
t += absl::Milliseconds(600);
EXPECT_EQ(absl::UnixEpoch() + absl::Milliseconds(1100), t);
t -= absl::Milliseconds(600);
EXPECT_EQ(absl::UnixEpoch() + absl::Milliseconds(500), t);
t -= absl::Milliseconds(500);
EXPECT_EQ(absl::UnixEpoch(), t);
}
TEST(Time, RelationalOperators) {
constexpr absl::Time t1 = absl::FromUnixNanos(0);
constexpr absl::Time t2 = absl::FromUnixNanos(1);
constexpr absl::Time t3 = absl::FromUnixNanos(2);
static_assert(absl::UnixEpoch() == t1, "");
static_assert(t1 == t1, "");
static_assert(t2 == t2, "");
static_assert(t3 == t3, "");
static_assert(t1 < t2, "");
static_assert(t2 < t3, "");
static_assert(t1 < t3, "");
static_assert(t1 <= t1, "");
static_assert(t1 <= t2, "");
static_assert(t2 <= t2, "");
static_assert(t2 <= t3, "");
static_assert(t3 <= t3, "");
static_assert(t1 <= t3, "");
static_assert(t2 > t1, "");
static_assert(t3 > t2, "");
static_assert(t3 > t1, "");
static_assert(t2 >= t2, "");
static_assert(t2 >= t1, "");
static_assert(t3 >= t3, "");
static_assert(t3 >= t2, "");
static_assert(t1 >= t1, "");
static_assert(t3 >= t1, "");
}
TEST(Time, Infinity) {
constexpr absl::Time ifuture = absl::InfiniteFuture();
constexpr absl::Time ipast = absl::InfinitePast();
static_assert(ifuture == ifuture, "");
static_assert(ipast == ipast, "");
static_assert(ipast < ifuture, "");
static_assert(ifuture > ipast, "");
EXPECT_EQ(ifuture, ifuture + absl::Seconds(1));
EXPECT_EQ(ifuture, ifuture - absl::Seconds(1));
EXPECT_EQ(ipast, ipast + absl::Seconds(1));
EXPECT_EQ(ipast, ipast - absl::Seconds(1));
EXPECT_EQ(absl::InfiniteDuration(), ifuture - ifuture);
EXPECT_EQ(absl::InfiniteDuration(), ifuture - ipast);
EXPECT_EQ(-absl::InfiniteDuration(), ipast - ifuture);
EXPECT_EQ(-absl::InfiniteDuration(), ipast - ipast);
constexpr absl::Time t = absl::UnixEpoch();
static_assert(t < ifuture, "");
static_assert(t > ipast, "");
EXPECT_EQ(ifuture, t + absl::InfiniteDuration());
EXPECT_EQ(ipast, t - absl::InfiniteDuration());
}
TEST(Time, FloorConversion) {
#define TEST_FLOOR_CONVERSION(TO, FROM) \
EXPECT_EQ(1, TO(FROM(1001))); \
EXPECT_EQ(1, TO(FROM(1000))); \
EXPECT_EQ(0, TO(FROM(999))); \
EXPECT_EQ(0, TO(FROM(1))); \
EXPECT_EQ(0, TO(FROM(0))); \
EXPECT_EQ(-1, TO(FROM(-1))); \
EXPECT_EQ(-1, TO(FROM(-999))); \
EXPECT_EQ(-1, TO(FROM(-1000))); \
EXPECT_EQ(-2, TO(FROM(-1001)));
TEST_FLOOR_CONVERSION(absl::ToUnixMicros, absl::FromUnixNanos);
TEST_FLOOR_CONVERSION(absl::ToUnixMillis, absl::FromUnixMicros);
TEST_FLOOR_CONVERSION(absl::ToUnixSeconds, absl::FromUnixMillis);
TEST_FLOOR_CONVERSION(absl::ToTimeT, absl::FromUnixMillis);
#undef TEST_FLOOR_CONVERSION
EXPECT_EQ(1, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(3) / 2));
EXPECT_EQ(1, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(1)));
EXPECT_EQ(0, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(1) / 2));
EXPECT_EQ(0, absl::ToUnixNanos(absl::UnixEpoch() + absl::ZeroDuration()));
EXPECT_EQ(-1,
absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(1) / 2));
EXPECT_EQ(-1, absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(1)));
EXPECT_EQ(-2,
absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(3) / 2));
EXPECT_EQ(1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(101)));
EXPECT_EQ(1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(100)));
EXPECT_EQ(0,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(99)));
EXPECT_EQ(0,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(1)));
EXPECT_EQ(0,
absl::ToUniversal(absl::UniversalEpoch() + absl::ZeroDuration()));
EXPECT_EQ(-1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-1)));
EXPECT_EQ(-1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-99)));
EXPECT_EQ(
-1, absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-100)));
EXPECT_EQ(
-2, absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-101)));
const struct {
absl::Time t;
timespec ts;
} to_ts[] = {
{absl::FromUnixSeconds(1) + absl::Nanoseconds(1), {1, 1}},
{absl::FromUnixSeconds(1) + absl::Nanoseconds(1) / 2, {1, 0}},
{absl::FromUnixSeconds(1) + absl::ZeroDuration(), {1, 0}},
{absl::FromUnixSeconds(0) + absl::ZeroDuration(), {0, 0}},
{absl::FromUnixSeconds(0) - absl::Nanoseconds(1) / 2, {-1, 999999999}},
{absl::FromUnixSeconds(0) - absl::Nanoseconds(1), {-1, 999999999}},
{absl::FromUnixSeconds(-1) + absl::Nanoseconds(1), {-1, 1}},
{absl::FromUnixSeconds(-1) + absl::Nanoseconds(1) / 2, {-1, 0}},
{absl::FromUnixSeconds(-1) + absl::ZeroDuration(), {-1, 0}},
{absl::FromUnixSeconds(-1) - absl::Nanoseconds(1) / 2, {-2, 999999999}},
};
for (const auto& test : to_ts) {
EXPECT_THAT(absl::ToTimespec(test.t), TimespecMatcher(test.ts));
}
const struct {
timespec ts;
absl::Time t;
} from_ts[] = {
{{1, 1}, absl::FromUnixSeconds(1) + absl::Nanoseconds(1)},
{{1, 0}, absl::FromUnixSeconds(1) + absl::ZeroDuration()},
{{0, 0}, absl::FromUnixSeconds(0) + absl::ZeroDuration()},
{{0, -1}, absl::FromUnixSeconds(0) - absl::Nanoseconds(1)},
{{-1, 999999999}, absl::FromUnixSeconds(0) - absl::Nanoseconds(1)},
{{-1, 1}, absl::FromUnixSeconds(-1) + absl::Nanoseconds(1)},
{{-1, 0}, absl::FromUnixSeconds(-1) + absl::ZeroDuration()},
{{-1, -1}, absl::FromUnixSeconds(-1) - absl::Nanoseconds(1)},
{{-2, 999999999}, absl::FromUnixSeconds(-1) - absl::Nanoseconds(1)},
};
for (const auto& test : from_ts) {
EXPECT_EQ(test.t, absl::TimeFromTimespec(test.ts));
}
const struct {
absl::Time t;
timeval tv;
} to_tv[] = {
{absl::FromUnixSeconds(1) + absl::Microseconds(1), {1, 1}},
{absl::FromUnixSeconds(1) + absl::Microseconds(1) / 2, {1, 0}},
{absl::FromUnixSeconds(1) + absl::ZeroDuration(), {1, 0}},
{absl::FromUnixSeconds(0) + absl::ZeroDuration(), {0, 0}},
{absl::FromUnixSeconds(0) - absl::Microseconds(1) / 2, {-1, 999999}},
{absl::FromUnixSeconds(0) - absl::Microseconds(1), {-1, 999999}},
{absl::FromUnixSeconds(-1) + absl::Microseconds(1), {-1, 1}},
{absl::FromUnixSeconds(-1) + absl::Microseconds(1) / 2, {-1, 0}},
{absl::FromUnixSeconds(-1) + absl::ZeroDuration(), {-1, 0}},
{absl::FromUnixSeconds(-1) - absl::Microseconds(1) / 2, {-2, 999999}},
};
for (const auto& test : to_tv) {
EXPECT_THAT(absl::ToTimeval(test.t), TimevalMatcher(test.tv));
}
const struct {
timeval tv;
absl::Time t;
} from_tv[] = {
{{1, 1}, absl::FromUnixSeconds(1) + absl::Microseconds(1)},
{{1, 0}, absl::FromUnixSeconds(1) + absl::ZeroDuration()},
{{0, 0}, absl::FromUnixSeconds(0) + absl::ZeroDuration()},
{{0, -1}, absl::FromUnixSeconds(0) - absl::Microseconds(1)},
{{-1, 999999}, absl::FromUnixSeconds(0) - absl::Microseconds(1)},
{{-1, 1}, absl::FromUnixSeconds(-1) + absl::Microseconds(1)},
{{-1, 0}, absl::FromUnixSeconds(-1) + absl::ZeroDuration()},
{{-1, -1}, absl::FromUnixSeconds(-1) - absl::Microseconds(1)},
{{-2, 999999}, absl::FromUnixSeconds(-1) - absl::Microseconds(1)},
};
for (const auto& test : from_tv) {
EXPECT_EQ(test.t, absl::TimeFromTimeval(test.tv));
}
const int64_t min_plus_1 = std::numeric_limits<int64_t>::min() + 1;
EXPECT_EQ(min_plus_1, absl::ToUnixSeconds(absl::FromUnixSeconds(min_plus_1)));
EXPECT_EQ(std::numeric_limits<int64_t>::min(),
absl::ToUnixSeconds(absl::FromUnixSeconds(min_plus_1) -
absl::Nanoseconds(1) / 2));
EXPECT_EQ(std::numeric_limits<int64_t>::max(),
absl::ToUnixSeconds(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max()) +
absl::Nanoseconds(1) / 2));
EXPECT_EQ(std::numeric_limits<int64_t>::max(),
absl::ToUnixSeconds(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max())));
EXPECT_EQ(std::numeric_limits<int64_t>::max() - 1,
absl::ToUnixSeconds(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max()) -
absl::Nanoseconds(1) / 2));
}
TEST(Time, RoundtripConversion) {
#define TEST_CONVERSION_ROUND_TRIP(SOURCE, FROM, TO, MATCHER) \
EXPECT_THAT(TO(FROM(SOURCE)), MATCHER(SOURCE))
int64_t now_ns = absl::GetCurrentTimeNanos();
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_ns, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq)
<< now_ns;
int64_t now_us = absl::GetCurrentTimeNanos() / 1000;
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_us, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq)
<< now_us;
int64_t now_ms = absl::GetCurrentTimeNanos() / 1000000;
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_ms, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq)
<< now_ms;
int64_t now_s = std::time(nullptr);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_s, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq)
<< now_s;
time_t now_time_t = std::time(nullptr);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromTimeT, absl::ToTimeT, testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromTimeT, absl::ToTimeT, testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromTimeT, absl::ToTimeT, testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_time_t, absl::FromTimeT, absl::ToTimeT,
testing::Eq)
<< now_time_t;
timeval tv;
tv.tv_sec = -1;
tv.tv_usec = 0;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = -1;
tv.tv_usec = 999999;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = 0;
tv.tv_usec = 0;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = 0;
tv.tv_usec = 1;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = 1;
tv.tv_usec = 0;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
timespec ts;
ts.tv_sec = -1;
ts.tv_nsec = 0;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = -1;
ts.tv_nsec = 999999999;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = 0;
ts.tv_nsec = 0;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = 0;
ts.tv_nsec = 1;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = 1;
ts.tv_nsec = 0;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
double now_ud = absl::GetCurrentTimeNanos() / 1000000;
TEST_CONVERSION_ROUND_TRIP(-1.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(-0.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(0.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(1.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(now_ud, absl::FromUDate, absl::ToUDate,
testing::DoubleEq)
<< std::fixed << std::setprecision(17) << now_ud;
int64_t now_uni = ((719162LL * (24 * 60 * 60)) * (1000 * 1000 * 10)) +
(absl::GetCurrentTimeNanos() / 100);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUniversal, absl::ToUniversal,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUniversal, absl::ToUniversal,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUniversal, absl::ToUniversal,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_uni, absl::FromUniversal, absl::ToUniversal,
testing::Eq)
<< now_uni;
#undef TEST_CONVERSION_ROUND_TRIP
}
template <typename Duration>
std::chrono::system_clock::time_point MakeChronoUnixTime(const Duration& d) {
return std::chrono::system_clock::from_time_t(0) + d;
}
TEST(Time, FromChrono) {
EXPECT_EQ(absl::FromTimeT(-1),
absl::FromChrono(std::chrono::system_clock::from_time_t(-1)));
EXPECT_EQ(absl::FromTimeT(0),
absl::FromChrono(std::chrono::system_clock::from_time_t(0)));
EXPECT_EQ(absl::FromTimeT(1),
absl::FromChrono(std::chrono::system_clock::from_time_t(1)));
EXPECT_EQ(
absl::FromUnixMillis(-1),
absl::FromChrono(MakeChronoUnixTime(std::chrono::milliseconds(-1))));
EXPECT_EQ(absl::FromUnixMillis(0),
absl::FromChrono(MakeChronoUnixTime(std::chrono::milliseconds(0))));
EXPECT_EQ(absl::FromUnixMillis(1),
absl::FromChrono(MakeChronoUnixTime(std::chrono::milliseconds(1))));
const auto century_sec = 60 * 60 * 24 * 365 * int64_t{100};
const auto century = std::chrono::seconds(century_sec);
const auto chrono_future = MakeChronoUnixTime(century);
const auto chrono_past = MakeChronoUnixTime(-century);
EXPECT_EQ(absl::FromUnixSeconds(century_sec),
absl::FromChrono(chrono_future));
EXPECT_EQ(absl::FromUnixSeconds(-century_sec), absl::FromChrono(chrono_past));
EXPECT_EQ(chrono_future,
absl::ToChronoTime(absl::FromUnixSeconds(century_sec)));
EXPECT_EQ(chrono_past,
absl::ToChronoTime(absl::FromUnixSeconds(-century_sec)));
}
TEST(Time, ToChronoTime) {
EXPECT_EQ(std::chrono::system_clock::from_time_t(-1),
absl::ToChronoTime(absl::FromTimeT(-1)));
EXPECT_EQ(std::chrono::system_clock::from_time_t(0),
absl::ToChronoTime(absl::FromTimeT(0)));
EXPECT_EQ(std::chrono::system_clock::from_time_t(1),
absl::ToChronoTime(absl::FromTimeT(1)));
EXPECT_EQ(MakeChronoUnixTime(std::chrono::milliseconds(-1)),
absl::ToChronoTime(absl::FromUnixMillis(-1)));
EXPECT_EQ(MakeChronoUnixTime(std::chrono::milliseconds(0)),
absl::ToChronoTime(absl::FromUnixMillis(0)));
EXPECT_EQ(MakeChronoUnixTime(std::chrono::milliseconds(1)),
absl::ToChronoTime(absl::FromUnixMillis(1)));
const auto tick = absl::Nanoseconds(1) / 4;
EXPECT_EQ(std::chrono::system_clock::from_time_t(0) -
std::chrono::system_clock::duration(1),
absl::ToChronoTime(absl::UnixEpoch() - tick));
}
TEST(Time, Chrono128) {
using Timestamp =
std::chrono::time_point<std::chrono::system_clock,
std::chrono::duration<absl::int128, std::atto>>;
for (const auto tp : {std::chrono::system_clock::time_point::min(),
std::chrono::system_clock::time_point::max()}) {
EXPECT_EQ(tp, absl::ToChronoTime(absl::FromChrono(tp)));
EXPECT_EQ(tp, std::chrono::time_point_cast<
std::chrono::system_clock::time_point::duration>(
std::chrono::time_point_cast<Timestamp::duration>(tp)));
}
Timestamp::duration::rep v = std::numeric_limits<int64_t>::min();
v *= Timestamp::duration::period::den;
auto ts = Timestamp(Timestamp::duration(v));
ts += std::chrono::duration<int64_t, std::atto>(0);
EXPECT_EQ(std::numeric_limits<int64_t>::min(),
ts.time_since_epoch().count() / Timestamp::duration::period::den);
EXPECT_EQ(0,
ts.time_since_epoch().count() % Timestamp::duration::period::den);
v = std::numeric_limits<int64_t>::max();
v *= Timestamp::duration::period::den;
ts = Timestamp(Timestamp::duration(v));
ts += std::chrono::duration<int64_t, std::atto>(999999999750000000);
EXPECT_EQ(std::numeric_limits<int64_t>::max(),
ts.time_since_epoch().count() / Timestamp::duration::period::den);
EXPECT_EQ(999999999750000000,
ts.time_since_epoch().count() % Timestamp::duration::period::den);
}
TEST(Time, TimeZoneAt) {
const absl::TimeZone nyc =
absl::time_internal::LoadTimeZone("America/New_York");
const std::string fmt = "%a, %e %b %Y %H:%M:%S %z (%Z)";
absl::CivilSecond nov01(2013, 11, 1, 8, 30, 0);
const auto nov01_ci = nyc.At(nov01);
EXPECT_EQ(absl::TimeZone::TimeInfo::UNIQUE, nov01_ci.kind);
EXPECT_EQ("Fri, 1 Nov 2013 08:30:00 -0400 (EDT)",
absl::FormatTime(fmt, nov01_ci.pre, nyc));
EXPECT_EQ(nov01_ci.pre, nov01_ci.trans);
EXPECT_EQ(nov01_ci.pre, nov01_ci.post);
EXPECT_EQ(nov01_ci.pre, absl::FromCivil(nov01, nyc));
absl::CivilSecond mar13(2011, 3, 13, 2, 15, 0);
const auto mar_ci = nyc.At(mar13);
EXPECT_EQ(absl::TimeZone::TimeInfo::SKIPPED, mar_ci.kind);
EXPECT_EQ("Sun, 13 Mar 2011 03:15:00 -0400 (EDT)",
absl::FormatTime(fmt, mar_ci.pre, nyc));
EXPECT_EQ("Sun, 13 Mar 2011 03:00:00 -0400 (EDT)",
absl::FormatTime(fmt, mar_ci.trans, nyc));
EXPECT_EQ("Sun, 13 Mar 2011 01:15:00 -0500 (EST)",
absl::FormatTime(fmt, mar_ci.post, nyc));
EXPECT_EQ(mar_ci.trans, absl::FromCivil(mar13, nyc));
absl::CivilSecond nov06(2011, 11, 6, 1, 15, 0);
const auto nov06_ci = nyc.At(nov06);
EXPECT_EQ(absl::TimeZone::TimeInfo::REPEATED, nov06_ci.kind);
EXPECT_EQ("Sun, 6 Nov 2011 01:15:00 -0400 (EDT)",
absl::FormatTime(fmt, nov06_ci.pre, nyc));
EXPECT_EQ("Sun, 6 Nov 2011 01:00:00 -0500 (EST)",
absl::FormatTime(fmt, nov06_ci.trans, nyc));
EXPECT_EQ("Sun, 6 Nov 2011 01:15:00 -0500 (EST)",
absl::FormatTime(fmt, nov06_ci.post, nyc));
EXPECT_EQ(nov06_ci.pre, absl::FromCivil(nov06, nyc));
absl::CivilSecond minus1(1969, 12, 31, 18, 59, 59);
const auto minus1_cl = nyc.At(minus1);
EXPECT_EQ(absl::TimeZone::TimeInfo::UNIQUE, minus1_cl.kind);
EXPECT_EQ(-1, absl::ToTimeT(minus1_cl.pre));
EXPECT_EQ("Wed, 31 Dec 1969 18:59:59 -0500 (EST)",
absl::FormatTime(fmt, minus1_cl.pre, nyc));
EXPECT_EQ("Wed, 31 Dec 1969 23:59:59 +0000 (UTC)",
absl::FormatTime(fmt, minus1_cl.pre, absl::UTCTimeZone()));
}
TEST(Time, FromCivilUTC) {
const absl::TimeZone utc = absl::UTCTimeZone();
const std::string fmt = "%a, %e %b %Y %H:%M:%S %z (%Z)";
const int kMax = std::numeric_limits<int>::max();
const int kMin = std::numeric_limits<int>::min();
absl::Time t;
t = absl::FromCivil(
absl::CivilSecond(292091940881, kMax, kMax, kMax, kMax, kMax), utc);
EXPECT_EQ("Fri, 25 Nov 292277026596 12:21:07 +0000 (UTC)",
absl::FormatTime(fmt, t, utc));
t = absl::FromCivil(
absl::CivilSecond(292091940882, kMax, kMax, kMax, kMax, kMax), utc);
EXPECT_EQ("infinite-future", absl::FormatTime(fmt, t, utc));
t = absl::FromCivil(
absl::CivilSecond(-292091936940, kMin, kMin, kMin, kMin, kMin), utc);
EXPECT_EQ("Fri, 1 Nov -292277022657 10:37:52 +0000 (UTC)",
absl::FormatTime(fmt, t, utc));
t = absl::FromCivil(
absl::CivilSecond(-292091936941, kMin, kMin, kMin, kMin, kMin), utc);
EXPECT_EQ("infinite-past", absl::FormatTime(fmt, t, utc));
t = absl::FromCivil(absl::CivilSecond(1900, 2, 28, 23, 59, 59), utc);
EXPECT_EQ("Wed, 28 Feb 1900 23:59:59 +0000 (UTC)",
absl::FormatTime(fmt, t, utc));
t = absl::FromCivil(absl::CivilSecond(1900, 3, 1, 0, 0, 0), utc);
EXPECT_EQ("Thu, 1 Mar 1900 00:00:00 +0000 (UTC)",
absl::FormatTime(fmt, t, utc));
t = absl::FromCivil(absl::CivilSecond(2000, 2, 29, 23, 59, 59), utc);
EXPECT_EQ("Tue, 29 Feb 2000 23:59:59 +0000 (UTC)",
absl::FormatTime(fmt, t, utc));
t = absl::FromCivil(absl::CivilSecond(2000, 3, 1, 0, 0, 0), utc);
EXPECT_EQ("Wed, 1 Mar 2000 00:00:00 +0000 (UTC)",
absl::FormatTime(fmt, t, utc));
}
TEST(Time, ToTM) {
const absl::TimeZone utc = absl::UTCTimeZone();
const absl::Time start =
absl::FromCivil(absl::CivilSecond(2014, 1, 2, 3, 4, 5), utc);
const absl::Time end =
absl::FromCivil(absl::CivilSecond(2014, 1, 5, 3, 4, 5), utc);
for (absl::Time t = start; t < end; t += absl::Seconds(30)) {
const struct tm tm_bt = absl::ToTM(t, utc);
const time_t tt = absl::ToTimeT(t);
struct tm tm_lc;
#ifdef _WIN32
gmtime_s(&tm_lc, &tt);
#else
gmtime_r(&tt, &tm_lc);
#endif
EXPECT_EQ(tm_lc.tm_year, tm_bt.tm_year);
EXPECT_EQ(tm_lc.tm_mon, tm_bt.tm_mon);
EXPECT_EQ(tm_lc.tm_mday, tm_bt.tm_mday);
EXPECT_EQ(tm_lc.tm_hour, tm_bt.tm_hour);
EXPECT_EQ(tm_lc.tm_min, tm_bt.tm_min);
EXPECT_EQ(tm_lc.tm_sec, tm_bt.tm_sec);
EXPECT_EQ(tm_lc.tm_wday, tm_bt.tm_wday);
EXPECT_EQ(tm_lc.tm_yday, tm_bt.tm_yday);
EXPECT_EQ(tm_lc.tm_isdst, tm_bt.tm_isdst);
ASSERT_FALSE(HasFailure());
}
const absl::TimeZone nyc =
absl::time_internal::LoadTimeZone("America/New_York");
absl::Time t = absl::FromCivil(absl::CivilSecond(2014, 3, 1, 0, 0, 0), nyc);
struct tm tm = absl::ToTM(t, nyc);
EXPECT_FALSE(tm.tm_isdst);
t = absl::FromCivil(absl::CivilSecond(2014, 4, 1, 0, 0, 0), nyc);
tm = absl::ToTM(t, nyc);
EXPECT_TRUE(tm.tm_isdst);
tm = absl::ToTM(absl::InfiniteFuture(), nyc);
EXPECT_EQ(std::numeric_limits<int>::max() - 1900, tm.tm_year);
EXPECT_EQ(11, tm.tm_mon);
EXPECT_EQ(31, tm.tm_mday);
EXPECT_EQ(23, tm.tm_hour);
EXPECT_EQ(59, tm.tm_min);
EXPECT_EQ(59, tm.tm_sec);
EXPECT_EQ(4, tm.tm_wday);
EXPECT_EQ(364, tm.tm_yday);
EXPECT_FALSE(tm.tm_isdst);
tm = absl::ToTM(absl::InfinitePast(), nyc);
EXPECT_EQ(std::numeric_limits<int>::min(), tm.tm_year);
EXPECT_EQ(0, tm.tm_mon);
EXPECT_EQ(1, tm.tm_mday);
EXPECT_EQ(0, tm.tm_hour);
EXPECT_EQ(0, tm.tm_min);
EXPECT_ | int64_t ToUnixNanos(Time t) {
if (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >= 0 &&
time_internal::GetRepHi(time_internal::ToUnixDuration(t)) >> 33 == 0) {
return (time_internal::GetRepHi(time_internal::ToUnixDuration(t)) *
1000 * 1000 * 1000) +
(time_internal::GetRepLo(time_internal::ToUnixDuration(t)) / 4);
}
return FloorToUnit(time_internal::ToUnixDuration(t), absl::Nanoseconds(1));
} | TEST(Time, FloorConversion) {
#define TEST_FLOOR_CONVERSION(TO, FROM) \
EXPECT_EQ(1, TO(FROM(1001))); \
EXPECT_EQ(1, TO(FROM(1000))); \
EXPECT_EQ(0, TO(FROM(999))); \
EXPECT_EQ(0, TO(FROM(1))); \
EXPECT_EQ(0, TO(FROM(0))); \
EXPECT_EQ(-1, TO(FROM(-1))); \
EXPECT_EQ(-1, TO(FROM(-999))); \
EXPECT_EQ(-1, TO(FROM(-1000))); \
EXPECT_EQ(-2, TO(FROM(-1001)));
TEST_FLOOR_CONVERSION(absl::ToUnixMicros, absl::FromUnixNanos);
TEST_FLOOR_CONVERSION(absl::ToUnixMillis, absl::FromUnixMicros);
TEST_FLOOR_CONVERSION(absl::ToUnixSeconds, absl::FromUnixMillis);
TEST_FLOOR_CONVERSION(absl::ToTimeT, absl::FromUnixMillis);
#undef TEST_FLOOR_CONVERSION
EXPECT_EQ(1, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(3) / 2));
EXPECT_EQ(1, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(1)));
EXPECT_EQ(0, absl::ToUnixNanos(absl::UnixEpoch() + absl::Nanoseconds(1) / 2));
EXPECT_EQ(0, absl::ToUnixNanos(absl::UnixEpoch() + absl::ZeroDuration()));
EXPECT_EQ(-1,
absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(1) / 2));
EXPECT_EQ(-1, absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(1)));
EXPECT_EQ(-2,
absl::ToUnixNanos(absl::UnixEpoch() - absl::Nanoseconds(3) / 2));
EXPECT_EQ(1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(101)));
EXPECT_EQ(1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(100)));
EXPECT_EQ(0,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(99)));
EXPECT_EQ(0,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(1)));
EXPECT_EQ(0,
absl::ToUniversal(absl::UniversalEpoch() + absl::ZeroDuration()));
EXPECT_EQ(-1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-1)));
EXPECT_EQ(-1,
absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-99)));
EXPECT_EQ(
-1, absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-100)));
EXPECT_EQ(
-2, absl::ToUniversal(absl::UniversalEpoch() + absl::Nanoseconds(-101)));
const struct {
absl::Time t;
timespec ts;
} to_ts[] = {
{absl::FromUnixSeconds(1) + absl::Nanoseconds(1), {1, 1}},
{absl::FromUnixSeconds(1) + absl::Nanoseconds(1) / 2, {1, 0}},
{absl::FromUnixSeconds(1) + absl::ZeroDuration(), {1, 0}},
{absl::FromUnixSeconds(0) + absl::ZeroDuration(), {0, 0}},
{absl::FromUnixSeconds(0) - absl::Nanoseconds(1) / 2, {-1, 999999999}},
{absl::FromUnixSeconds(0) - absl::Nanoseconds(1), {-1, 999999999}},
{absl::FromUnixSeconds(-1) + absl::Nanoseconds(1), {-1, 1}},
{absl::FromUnixSeconds(-1) + absl::Nanoseconds(1) / 2, {-1, 0}},
{absl::FromUnixSeconds(-1) + absl::ZeroDuration(), {-1, 0}},
{absl::FromUnixSeconds(-1) - absl::Nanoseconds(1) / 2, {-2, 999999999}},
};
for (const auto& test : to_ts) {
EXPECT_THAT(absl::ToTimespec(test.t), TimespecMatcher(test.ts));
}
const struct {
timespec ts;
absl::Time t;
} from_ts[] = {
{{1, 1}, absl::FromUnixSeconds(1) + absl::Nanoseconds(1)},
{{1, 0}, absl::FromUnixSeconds(1) + absl::ZeroDuration()},
{{0, 0}, absl::FromUnixSeconds(0) + absl::ZeroDuration()},
{{0, -1}, absl::FromUnixSeconds(0) - absl::Nanoseconds(1)},
{{-1, 999999999}, absl::FromUnixSeconds(0) - absl::Nanoseconds(1)},
{{-1, 1}, absl::FromUnixSeconds(-1) + absl::Nanoseconds(1)},
{{-1, 0}, absl::FromUnixSeconds(-1) + absl::ZeroDuration()},
{{-1, -1}, absl::FromUnixSeconds(-1) - absl::Nanoseconds(1)},
{{-2, 999999999}, absl::FromUnixSeconds(-1) - absl::Nanoseconds(1)},
};
for (const auto& test : from_ts) {
EXPECT_EQ(test.t, absl::TimeFromTimespec(test.ts));
}
const struct {
absl::Time t;
timeval tv;
} to_tv[] = {
{absl::FromUnixSeconds(1) + absl::Microseconds(1), {1, 1}},
{absl::FromUnixSeconds(1) + absl::Microseconds(1) / 2, {1, 0}},
{absl::FromUnixSeconds(1) + absl::ZeroDuration(), {1, 0}},
{absl::FromUnixSeconds(0) + absl::ZeroDuration(), {0, 0}},
{absl::FromUnixSeconds(0) - absl::Microseconds(1) / 2, {-1, 999999}},
{absl::FromUnixSeconds(0) - absl::Microseconds(1), {-1, 999999}},
{absl::FromUnixSeconds(-1) + absl::Microseconds(1), {-1, 1}},
{absl::FromUnixSeconds(-1) + absl::Microseconds(1) / 2, {-1, 0}},
{absl::FromUnixSeconds(-1) + absl::ZeroDuration(), {-1, 0}},
{absl::FromUnixSeconds(-1) - absl::Microseconds(1) / 2, {-2, 999999}},
};
for (const auto& test : to_tv) {
EXPECT_THAT(absl::ToTimeval(test.t), TimevalMatcher(test.tv));
}
const struct {
timeval tv;
absl::Time t;
} from_tv[] = {
{{1, 1}, absl::FromUnixSeconds(1) + absl::Microseconds(1)},
{{1, 0}, absl::FromUnixSeconds(1) + absl::ZeroDuration()},
{{0, 0}, absl::FromUnixSeconds(0) + absl::ZeroDuration()},
{{0, -1}, absl::FromUnixSeconds(0) - absl::Microseconds(1)},
{{-1, 999999}, absl::FromUnixSeconds(0) - absl::Microseconds(1)},
{{-1, 1}, absl::FromUnixSeconds(-1) + absl::Microseconds(1)},
{{-1, 0}, absl::FromUnixSeconds(-1) + absl::ZeroDuration()},
{{-1, -1}, absl::FromUnixSeconds(-1) - absl::Microseconds(1)},
{{-2, 999999}, absl::FromUnixSeconds(-1) - absl::Microseconds(1)},
};
for (const auto& test : from_tv) {
EXPECT_EQ(test.t, absl::TimeFromTimeval(test.tv));
}
const int64_t min_plus_1 = std::numeric_limits<int64_t>::min() + 1;
EXPECT_EQ(min_plus_1, absl::ToUnixSeconds(absl::FromUnixSeconds(min_plus_1)));
EXPECT_EQ(std::numeric_limits<int64_t>::min(),
absl::ToUnixSeconds(absl::FromUnixSeconds(min_plus_1) -
absl::Nanoseconds(1) / 2));
EXPECT_EQ(std::numeric_limits<int64_t>::max(),
absl::ToUnixSeconds(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max()) +
absl::Nanoseconds(1) / 2));
EXPECT_EQ(std::numeric_limits<int64_t>::max(),
absl::ToUnixSeconds(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max())));
EXPECT_EQ(std::numeric_limits<int64_t>::max() - 1,
absl::ToUnixSeconds(
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max()) -
absl::Nanoseconds(1) / 2));
}
TEST(Time, RoundtripConversion) {
#define TEST_CONVERSION_ROUND_TRIP(SOURCE, FROM, TO, MATCHER) \
EXPECT_THAT(TO(FROM(SOURCE)), MATCHER(SOURCE))
int64_t now_ns = absl::GetCurrentTimeNanos();
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_ns, absl::FromUnixNanos, absl::ToUnixNanos,
testing::Eq)
<< now_ns;
int64_t now_us = absl::GetCurrentTimeNanos() / 1000;
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_us, absl::FromUnixMicros, absl::ToUnixMicros,
testing::Eq)
<< now_us;
int64_t now_ms = absl::GetCurrentTimeNanos() / 1000000;
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_ms, absl::FromUnixMillis, absl::ToUnixMillis,
testing::Eq)
<< now_ms;
int64_t now_s = std::time(nullptr);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_s, absl::FromUnixSeconds, absl::ToUnixSeconds,
testing::Eq)
<< now_s;
time_t now_time_t = std::time(nullptr);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromTimeT, absl::ToTimeT, testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromTimeT, absl::ToTimeT, testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromTimeT, absl::ToTimeT, testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_time_t, absl::FromTimeT, absl::ToTimeT,
testing::Eq)
<< now_time_t;
timeval tv;
tv.tv_sec = -1;
tv.tv_usec = 0;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = -1;
tv.tv_usec = 999999;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = 0;
tv.tv_usec = 0;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = 0;
tv.tv_usec = 1;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
tv.tv_sec = 1;
tv.tv_usec = 0;
TEST_CONVERSION_ROUND_TRIP(tv, absl::TimeFromTimeval, absl::ToTimeval,
TimevalMatcher);
timespec ts;
ts.tv_sec = -1;
ts.tv_nsec = 0;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = -1;
ts.tv_nsec = 999999999;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = 0;
ts.tv_nsec = 0;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = 0;
ts.tv_nsec = 1;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
ts.tv_sec = 1;
ts.tv_nsec = 0;
TEST_CONVERSION_ROUND_TRIP(ts, absl::TimeFromTimespec, absl::ToTimespec,
TimespecMatcher);
double now_ud = absl::GetCurrentTimeNanos() / 1000000;
TEST_CONVERSION_ROUND_TRIP(-1.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(-0.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(0.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(1.5, absl::FromUDate, absl::ToUDate,
testing::DoubleEq);
TEST_CONVERSION_ROUND_TRIP(now_ud, absl::FromUDate, absl::ToUDate,
testing::DoubleEq)
<< std::fixed << std::setprecision(17) << now_ud;
int64_t now_uni = ((719162LL * (24 * 60 * 60)) * (1000 * 1000 * 10)) +
(absl::GetCurrentTimeNanos() / 100);
TEST_CONVERSION_ROUND_TRIP(-1, absl::FromUniversal, absl::ToUniversal,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(0, absl::FromUniversal, absl::ToUniversal,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(1, absl::FromUniversal, absl::ToUniversal,
testing::Eq);
TEST_CONVERSION_ROUND_TRIP(now_uni, absl::FromUniversal, absl::ToUniversal,
testing::Eq)
<< now_uni;
#undef TEST_CONVERSION_ROUND_TRIP
} |
#include "absl/base/internal/sysinfo.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/host_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/numa.h"
#include "tsl/platform/profile_utils/cpu_utils.h"
#include "tsl/platform/snappy.h"
#include "tsl/platform/types.h"
#if defined(__linux__)
#include <sched.h>
#include <sys/sysinfo.h>
#else
#include <sys/syscall.h>
#endif
#if (__x86_64__ || __i386__)
#include <cpuid.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifdef TF_USE_SNAPPY
#include "snappy.h"
#endif
#if (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || \
defined(__HAIKU__)
#include <thread>
#endif
#if TENSORFLOW_USE_NUMA
#include "hwloc.h"
#endif
#if defined(__ANDROID__) && (defined(__i386__) || defined(__x86_64__))
#define TENSORFLOW_HAS_CXA_DEMANGLE 0
#elif (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \
!defined(__mips__)
#define TENSORFLOW_HAS_CXA_DEMANGLE 1
#elif defined(__clang__) && !defined(_MSC_VER)
#define TENSORFLOW_HAS_CXA_DEMANGLE 1
#else
#define TENSORFLOW_HAS_CXA_DEMANGLE 0
#endif
#if TENSORFLOW_HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace tsl {
namespace port {
void InitMain(const char* usage, int* argc, char*** argv) {}
string Hostname() {
char hostname[1024];
gethostname(hostname, sizeof hostname);
hostname[sizeof hostname - 1] = 0;
return string(hostname);
}
string JobName() {
const char* job_name_cs = std::getenv("TF_JOB_NAME");
if (job_name_cs != nullptr) {
return string(job_name_cs);
}
return "";
}
int64_t JobUid() { return -1; }
int64_t TaskId() { return -1; }
int NumSchedulableCPUs() {
#if defined(__linux__)
for (int ncpus = 1024; ncpus < std::numeric_limits<int>::max() / 2;
ncpus *= 2) {
size_t setsize = CPU_ALLOC_SIZE(ncpus);
cpu_set_t* mask = CPU_ALLOC(ncpus);
if (!mask) break;
if (sched_getaffinity(0, setsize, mask) == 0) {
int result = CPU_COUNT_S(setsize, mask);
CPU_FREE(mask);
return result;
}
CPU_FREE(mask);
if (errno != EINVAL) break;
}
perror("sched_getaffinity");
#endif
#if (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || \
defined(__HAIKU__)
unsigned int count = std::thread::hardware_concurrency();
if (count > 0) return static_cast<int>(count);
#endif
const int kDefaultCores = 4;
fprintf(stderr, "can't determine number of CPU cores: assuming %d\n",
kDefaultCores);
return kDefaultCores;
}
int MaxParallelism() { return NumSchedulableCPUs(); }
int MaxParallelism(int numa_node) {
if (numa_node != port::kNUMANoAffinity) {
return NumSchedulableCPUs() / port::NUMANumNodes();
}
return NumSchedulableCPUs();
}
int NumTotalCPUs() {
int count = absl::base_internal::NumCPUs();
return (count <= 0) ? kUnknownCPU : count;
}
int GetCurrentCPU() {
#if defined(__EMSCRIPTEN__)
return sched_getcpu();
#elif defined(__linux__)
return sched_getcpu();
#elif defined(__cpuid) && !defined(__APPLE__)
uint32_t eax = 0;
uint32_t ebx = 0;
uint32_t ecx = 0;
uint32_t edx = 0;
__cpuid(1, eax, ebx, ecx, edx);
if ((edx & (1 << 9)) != 0) {
return (ebx & 0xFF) >> 24;
}
#elif defined(__NR_getcpu)
unsigned int cpu;
if (syscall(__NR_getcpu, &cpu, NULL, NULL) < 0) {
return kUnknownCPU;
} else {
return static_cast<int>(cpu);
}
#endif
return kUnknownCPU;
}
int NumHyperthreadsPerCore() {
static const int ht_per_core = tsl::port::CPUIDNumSMT();
return (ht_per_core > 0) ? ht_per_core : 1;
}
#ifdef TENSORFLOW_USE_NUMA
namespace {
static hwloc_topology_t hwloc_topology_handle;
bool HaveHWLocTopology() {
static bool init = []() {
if (hwloc_topology_init(&hwloc_topology_handle)) {
LOG(ERROR) << "Call to hwloc_topology_init() failed";
return false;
}
if (hwloc_topology_load(hwloc_topology_handle)) {
LOG(ERROR) << "Call to hwloc_topology_load() failed";
return false;
}
return true;
}();
return init;
}
hwloc_obj_t GetHWLocTypeIndex(hwloc_obj_type_t tp, int index) {
hwloc_obj_t obj = nullptr;
if (index >= 0) {
while ((obj = hwloc_get_next_obj_by_type(hwloc_topology_handle, tp, obj)) !=
nullptr) {
if (obj->os_index == index) break;
}
}
return obj;
}
}
#endif
bool NUMAEnabled() { return (NUMANumNodes() > 1); }
int NUMANumNodes() {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
int num_numanodes =
hwloc_get_nbobjs_by_type(hwloc_topology_handle, HWLOC_OBJ_NUMANODE);
return std::max(1, num_numanodes);
} else {
return 1;
}
#else
return 1;
#endif
}
void NUMASetThreadNodeAffinity(int node) {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_obj_t obj = GetHWLocTypeIndex(HWLOC_OBJ_NUMANODE, node);
if (obj) {
hwloc_set_cpubind(hwloc_topology_handle, obj->cpuset,
HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_STRICT);
} else {
LOG(ERROR) << "Could not find hwloc NUMA node " << node;
}
}
#endif
}
int NUMAGetThreadNodeAffinity() {
int node_index = kNUMANoAffinity;
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_cpuset_t thread_cpuset = hwloc_bitmap_alloc();
hwloc_get_cpubind(hwloc_topology_handle, thread_cpuset,
HWLOC_CPUBIND_THREAD);
hwloc_obj_t obj = nullptr;
while ((obj = hwloc_get_next_obj_by_type(
hwloc_topology_handle, HWLOC_OBJ_NUMANODE, obj)) != nullptr) {
if (hwloc_bitmap_isincluded(thread_cpuset, obj->cpuset)) {
node_index = obj->os_index;
break;
}
}
hwloc_bitmap_free(thread_cpuset);
}
#endif
return node_index;
}
void* NUMAMalloc(int node, size_t size, int minimum_alignment) {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_obj_t numa_node = GetHWLocTypeIndex(HWLOC_OBJ_NUMANODE, node);
if (numa_node) {
return hwloc_alloc_membind(hwloc_topology_handle, size,
numa_node->nodeset, HWLOC_MEMBIND_BIND,
HWLOC_MEMBIND_BYNODESET);
} else {
LOG(ERROR) << "Failed to find hwloc NUMA node " << node;
}
}
#endif
return tsl::port::AlignedMalloc(size, minimum_alignment);
}
void NUMAFree(void* ptr, size_t size) {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_free(hwloc_topology_handle, ptr, size);
return;
}
#endif
tsl::port::Free(ptr);
}
int NUMAGetMemAffinity(const void* addr) {
int node = kNUMANoAffinity;
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology() && addr) {
hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
if (!hwloc_get_area_memlocation(hwloc_topology_handle, addr, 4, nodeset,
HWLOC_MEMBIND_BYNODESET)) {
hwloc_obj_t obj = nullptr;
while ((obj = hwloc_get_next_obj_by_type(
hwloc_topology_handle, HWLOC_OBJ_NUMANODE, obj)) != nullptr) {
if (hwloc_bitmap_isincluded(nodeset, obj->nodeset)) {
node = obj->os_index;
break;
}
}
hwloc_bitmap_free(nodeset);
} else {
LOG(ERROR) << "Failed call to hwloc_get_area_memlocation.";
}
}
#endif
return node;
}
bool Snappy_Compress(const char* input, size_t length, string* output) {
#ifdef TF_USE_SNAPPY
output->resize(snappy::MaxCompressedLength(length));
size_t outlen;
snappy::RawCompress(input, length, &(*output)[0], &outlen);
output->resize(outlen);
return true;
#else
return false;
#endif
}
bool Snappy_CompressFromIOVec(const struct iovec* iov,
size_t uncompressed_length, string* output) {
#ifdef TF_USE_SNAPPY
output->resize(snappy::MaxCompressedLength(uncompressed_length));
size_t outlen;
snappy::RawCompressFromIOVec(iov, uncompressed_length, &(*output)[0],
&outlen);
output->resize(outlen);
return true;
#else
return false;
#endif
}
bool Snappy_GetUncompressedLength(const char* input, size_t length,
size_t* result) {
#ifdef TF_USE_SNAPPY
return snappy::GetUncompressedLength(input, length, result);
#else
return false;
#endif
}
bool Snappy_Uncompress(const char* input, size_t length, char* output) {
#ifdef TF_USE_SNAPPY
return snappy::RawUncompress(input, length, output);
#else
return false;
#endif
}
bool Snappy_UncompressToIOVec(const char* compressed, size_t compressed_length,
const struct iovec* iov, size_t iov_cnt) {
#ifdef TF_USE_SNAPPY
return snappy::RawUncompressToIOVec(compressed, compressed_length, iov,
iov_cnt);
#else
return false;
#endif
}
static void DemangleToString(const char* mangled, string* out) {
int status = 0;
char* demangled = nullptr;
#if TENSORFLOW_HAS_CXA_DEMANGLE
demangled = abi::__cxa_demangle(mangled, nullptr, nullptr, &status);
#endif
if (status == 0 && demangled != nullptr) {
out->append(demangled);
free(demangled);
} else {
out->append(mangled);
}
}
string Demangle(const char* mangled) {
string demangled;
DemangleToString(mangled, &demangled);
return demangled;
}
double NominalCPUFrequency() {
return tsl::profile_utils::CpuUtils::GetCycleCounterFrequency();
}
}
}
namespace tsl {
namespace port {
void* AlignedMalloc(size_t size, int minimum_alignment) {
#if defined(__ANDROID__)
return memalign(minimum_alignment, size);
#else
void* ptr = nullptr;
const int required_alignment = sizeof(void*);
if (minimum_alignment < required_alignment) return Malloc(size);
int err = posix_memalign(&ptr, minimum_alignment, size);
if (err != 0) {
return nullptr;
} else {
return ptr;
}
#endif
}
void AlignedFree(void* aligned_memory) { Free(aligned_memory); }
void* Malloc(size_t size) { return malloc(size); }
void* Realloc(void* ptr, size_t size) { return realloc(ptr, size); }
void Free(void* ptr) { free(ptr); }
void MallocExtension_ReleaseToSystem(std::size_t num_bytes) {
}
std::size_t MallocExtension_GetAllocatedSize(const void* p) {
#if !defined(__ANDROID__)
return 0;
#else
return malloc_usable_size(p);
#endif
}
MemoryInfo GetMemoryInfo() {
MemoryInfo mem_info = {INT64_MAX, INT64_MAX};
#if defined(__linux__)
struct sysinfo info;
int err = sysinfo(&info);
if (err == 0) {
mem_info.free = info.freeram;
mem_info.total = info.totalram;
}
#endif
return mem_info;
}
MemoryBandwidthInfo GetMemoryBandwidthInfo() {
MemoryBandwidthInfo membw_info = {INT64_MAX};
return membw_info;
}
IOStatistics GetIOStatistics() { return IOStatistics(); }
}
} | #include <condition_variable>
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace port {
TEST(Port, AlignedMalloc) {
for (size_t alignment = 1; alignment <= 1 << 20; alignment <<= 1) {
void* p = AlignedMalloc(1, alignment);
ASSERT_TRUE(p != nullptr) << "AlignedMalloc(1, " << alignment << ")";
uintptr_t pval = reinterpret_cast<uintptr_t>(p);
EXPECT_EQ(pval % alignment, 0);
AlignedFree(p);
}
}
TEST(Port, GetCurrentCPU) {
const int cpu = GetCurrentCPU();
#if !defined(__APPLE__)
EXPECT_GE(cpu, 0);
EXPECT_LT(cpu, NumTotalCPUs());
#endif
}
TEST(ConditionVariable, WaitForMilliseconds_Timeout) {
mutex m;
mutex_lock l(m);
condition_variable cv;
ConditionResult result = tsl::kCond_MaybeNotified;
time_t start = time(nullptr);
while (result == tsl::kCond_MaybeNotified) {
result = WaitForMilliseconds(&l, &cv, 3000);
}
EXPECT_EQ(result, tsl::kCond_Timeout);
time_t finish = time(nullptr);
EXPECT_GE(finish - start, 3);
}
TEST(ConditionVariable, WaitForMilliseconds_Signalled) {
thread::ThreadPool pool(Env::Default(), "test", 1);
mutex m;
mutex_lock l(m);
condition_variable cv;
time_t start = time(nullptr);
pool.Schedule([&m, &cv]() {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
mutex_lock l(m);
cv.notify_all();
});
EXPECT_EQ(WaitForMilliseconds(&l, &cv, 3000), tsl::kCond_MaybeNotified);
time_t finish = time(nullptr);
EXPECT_LT(finish - start, 3);
}
TEST(ConditionalCriticalSections, AwaitWithDeadline_Timeout) {
bool always_false = false;
mutex m;
m.lock();
time_t start = time(nullptr);
bool result =
m.AwaitWithDeadline(Condition(&always_false),
EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos);
time_t finish = time(nullptr);
m.unlock();
EXPECT_EQ(result, false);
EXPECT_GE(finish - start, 3);
}
TEST(ConditionalCriticalSections, AwaitWithDeadline_Woken) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool woken = false;
mutex m;
m.lock();
time_t start = time(nullptr);
pool.Schedule([&m, &woken]() {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
m.lock();
woken = true;
m.unlock();
});
bool result = m.AwaitWithDeadline(
Condition(&woken), EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos);
time_t finish = time(nullptr);
m.unlock();
EXPECT_EQ(result, true);
EXPECT_LT(finish - start, 3);
}
static bool Invert(bool* b) { return !*b; }
class InvertClass {
public:
explicit InvertClass(bool* value) : value_(value) {}
bool Value() { return !*this->value_; }
private:
InvertClass();
bool* value_;
};
TEST(ConditionalCriticalSections, Await_PingPong) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool ping_pong = false;
bool done = false;
mutex m;
pool.Schedule([&m, &ping_pong, &done]() {
m.lock();
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&ping_pong));
ping_pong = false;
}
done = true;
m.unlock();
});
m.lock();
InvertClass invert(&ping_pong);
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&Invert, &ping_pong));
ping_pong = true;
}
m.Await(Condition(&done));
m.unlock();
}
TEST(ConditionalCriticalSections, Await_PingPongMethod) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool ping_pong = false;
bool done = false;
mutex m;
pool.Schedule([&m, &ping_pong, &done]() {
m.lock();
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&ping_pong));
ping_pong = false;
}
done = true;
m.unlock();
});
m.lock();
InvertClass invert(&ping_pong);
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&invert, &InvertClass::Value));
ping_pong = true;
}
m.Await(Condition(&done));
m.unlock();
}
TEST(TestCPUFeature, TestFeature) {
const bool has_avx = TestCPUFeature(CPUFeature::AVX);
LOG(INFO) << "has_avx = " << has_avx;
const bool has_avx2 = TestCPUFeature(CPUFeature::AVX2);
LOG(INFO) << "has_avx2 = " << has_avx2;
}
}
} | namespace tsl {
namespace port {
void* AlignedMalloc(size_t size, int minimum_alignment) {
#if defined(__ANDROID__)
return memalign(minimum_alignment, size);
#else
void* ptr = nullptr;
const int required_alignment = sizeof(void*);
if (minimum_alignment < required_alignment) return Malloc(size);
int err = posix_memalign(&ptr, minimum_alignment, size);
if (err != 0) {
return nullptr;
} else {
return ptr;
}
#endif
} | #include <condition_variable>
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace port {
TEST(Port, AlignedMalloc) {
for (size_t alignment = 1; alignment <= 1 << 20; alignment <<= 1) {
void* p = AlignedMalloc(1, alignment);
ASSERT_TRUE(p != nullptr) << "AlignedMalloc(1, " << alignment << ")";
uintptr_t pval = reinterpret_cast<uintptr_t>(p);
EXPECT_EQ(pval % alignment, 0);
AlignedFree(p);
}
} |
#ifndef AROLLA_IO_ACCESSORS_INPUT_LOADER_H_
#define AROLLA_IO_ACCESSORS_INPUT_LOADER_H_
#include <cstddef>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/io/accessor_helpers.h"
#include "arolla/io/input_loader.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/meta.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
template <class Accessor, class Input, class Output>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InvokeInputLoaderAccessor(
const Accessor& accessor, const Input& input, RawBufferFactory* factory,
Output* output) {
if constexpr (std::is_invocable_v<const Accessor&, const Input&,
RawBufferFactory*, Output*>) {
accessor(input, factory, output);
} else if constexpr (std::is_invocable_v<const Accessor&, const Input&,
Output*>) {
((void)(factory));
accessor(input, output);
} else if constexpr (std::is_invocable_v<const Accessor&, const Input&,
RawBufferFactory*>) {
*output = accessor(input, factory);
} else if constexpr (std::is_invocable_v<const Accessor&, const Input&>) {
((void)(factory));
*output = accessor(input);
}
}
namespace input_loader_impl {
template <class Accessor, class Input>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto InvokeInputLoaderAccessorTypeMeta() {
if constexpr (std::is_invocable_v<const Accessor&, const Input&,
RawBufferFactory*>) {
return std::invoke_result<const Accessor&, const Input&,
RawBufferFactory*>();
} else if constexpr (std::is_invocable_v<const Accessor&, const Input&>) {
return std::invoke_result<const Accessor&, const Input&>();
} else {
using info = meta::function_traits<std::decay_t<Accessor>>;
if constexpr (info::arity == 2) {
using Output = std::remove_pointer_t<
std::tuple_element_t<1, typename info::arg_types::tuple>>;
static_assert(std::is_invocable_v<const Accessor&, const Input&, Output*>,
"Unexpected accessor signature.");
return meta::type<Output>();
} else {
using Output = std::remove_pointer_t<
std::tuple_element_t<2, typename info::arg_types::tuple>>;
static_assert(std::is_invocable_v<const Accessor&, const Input&,
RawBufferFactory*, Output*>,
"Unexpected accessor signature.");
return meta::type<Output>();
}
}
}
}
template <class Accessor, class Input>
using InputLoaderAccessorResultType = std::decay_t<
typename decltype(input_loader_impl::InvokeInputLoaderAccessorTypeMeta<
const Accessor&, const Input&>())::type>;
namespace input_loader_impl {
template <class Input, class NameAccessorsTuple>
class AccessorsInputLoader;
template <class Input, class Accessor>
class Setter {
public:
using ResultType = InputLoaderAccessorResultType<Accessor, Input>;
Setter(std::optional<FrameLayout::Slot<ResultType>> slot, Accessor accessor)
: slot_(slot), accessor_(std::move(accessor)) {}
static absl::StatusOr<Setter> Build(std::optional<TypedSlot> slot,
const Accessor& accessor) {
if (slot.has_value()) {
ASSIGN_OR_RETURN(auto specific_slot, slot->ToSlot<ResultType>());
return {Setter({specific_slot}, accessor)};
} else {
return {Setter(std::nullopt, accessor)};
}
}
void operator()(const Input& input, FramePtr frame,
RawBufferFactory* factory) const {
if (slot_.has_value()) {
InvokeInputLoaderAccessor(accessor_, input, factory,
frame.GetMutable(*slot_));
}
}
private:
std::optional<FrameLayout::Slot<ResultType>> slot_;
Accessor accessor_;
};
template <class Input, class... Accessors>
class AccessorsInputLoader<Input,
std::tuple<std::pair<std::string, Accessors>...>>
final : public StaticInputLoader<Input> {
using NameAccessorsTuple = std::tuple<std::pair<std::string, Accessors>...>;
public:
static absl::StatusOr<InputLoaderPtr<Input>> Build(
NameAccessorsTuple accessors) {
auto output_types_in_order = CreateOutputTypesInOrder(
accessors,
std::make_index_sequence<std::tuple_size<NameAccessorsTuple>::value>{});
RETURN_IF_ERROR(ValidateDuplicatedNames(output_types_in_order));
return InputLoaderPtr<Input>(
static_cast<InputLoader<Input>*>(new AccessorsInputLoader(
std::move(accessors), std::move(output_types_in_order))));
}
absl::StatusOr<BoundInputLoader<Input>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& output_slots)
const final {
ASSIGN_OR_RETURN(auto slots, MaybeFindSlotsAndVerifyTypes(
this->types_in_order(), output_slots));
return BindImpl(
std::move(slots),
std::make_index_sequence<std::tuple_size<NameAccessorsTuple>::value>{});
}
private:
explicit AccessorsInputLoader(
NameAccessorsTuple accessors,
std::vector<std::pair<std::string, QTypePtr>> output_types_in_order)
: StaticInputLoader<Input>(std::move(output_types_in_order)),
accessors_(std::move(accessors)) {}
template <size_t I>
using Accessor =
std::tuple_element_t<1, std::tuple_element_t<I, NameAccessorsTuple>>;
template <size_t I>
using InputLoaderAccessorResultType =
InputLoaderAccessorResultType<Accessor<I>, Input>;
template <size_t I>
const Accessor<I>& GetAccessor() const {
return std::get<1>(std::get<I>(accessors_));
}
template <size_t I>
static QTypePtr GetOutputType() {
return GetQType<InputLoaderAccessorResultType<I>>();
}
template <size_t... Is>
static std::vector<std::pair<std::string, QTypePtr>> CreateOutputTypesInOrder(
const NameAccessorsTuple& accessors, std::index_sequence<Is...>) {
return {{std::string(std::get<0>(std::get<Is>(accessors))),
GetOutputType<Is>()}...};
}
template <size_t... Is>
absl::StatusOr<BoundInputLoader<Input>> BindImpl(
std::vector<std::optional<TypedSlot>> slots,
std::index_sequence<Is...>) const {
auto setters_or = LiftStatusUp(
Setter<Input, Accessor<Is>>::Build(slots[Is], GetAccessor<Is>())...);
ASSIGN_OR_RETURN(auto setters, setters_or);
return BoundInputLoader<Input>(
[setters_(std::move(setters))](
const Input& input ABSL_ATTRIBUTE_UNUSED,
FramePtr frame ABSL_ATTRIBUTE_UNUSED,
RawBufferFactory* factory ABSL_ATTRIBUTE_UNUSED) {
(std::get<Is>(setters_)(input, frame, factory), ...);
return absl::OkStatus();
});
}
NameAccessorsTuple accessors_;
};
}
template <class Input, class NameAccessorsTuple>
using AccessorsInputLoader =
input_loader_impl::AccessorsInputLoader<Input, NameAccessorsTuple>;
template <class Input, class... Accessors>
absl::StatusOr<InputLoaderPtr<Input>> CreateAccessorsInputLoaderFromTuple(
std::tuple<std::pair<std::string, Accessors>...> name_accessors) {
return AccessorsInputLoader<Input, decltype(name_accessors)>::Build(
std::move(name_accessors));
}
template <class Input, class... NameAccessors>
absl::StatusOr<InputLoaderPtr<Input>> CreateAccessorsInputLoader(
NameAccessors... name_accessors) {
return CreateAccessorsInputLoaderFromTuple<Input>(
accessor_helpers_impl::ConvertNameAccessorsPackToNestedTuple(
name_accessors...));
}
}
#endif | #include "arolla/io/accessors_input_loader.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/testing/matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::InputLoaderSupports;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
struct TestStruct {
int a;
double b;
};
struct GetAConstRef {
const int& operator()(const TestStruct& s) const { return s.a; }
};
struct GetBValue {
double operator()(const TestStruct& s) const { return s.b; }
};
struct GetBValueViaOutputArg {
void operator()(const TestStruct& s, double* res) const { *res = s.b; }
};
struct GetBValueViaOutputArgWithRawBufferFactory {
void operator()(const TestStruct& s, RawBufferFactory*, double* res) const {
*res = s.b;
}
};
TEST(InputLoaderTest, InputLoaderAccessorResultType) {
static_assert(
std::is_same_v<InputLoaderAccessorResultType<GetAConstRef, TestStruct>,
int>);
static_assert(
std::is_same_v<InputLoaderAccessorResultType<GetBValue, TestStruct>,
double>);
static_assert(
std::is_same_v<
InputLoaderAccessorResultType<GetBValueViaOutputArg, TestStruct>,
double>);
static_assert(
std::is_same_v<InputLoaderAccessorResultType<
GetBValueViaOutputArgWithRawBufferFactory, TestStruct>,
double>);
}
TEST(InputLoaderTest, AccessorsInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
auto accessors_tuple = std::make_tuple(
std::make_pair(std::string("a"), GetAConstRef{}),
std::make_pair(std::string("b"), GetBValue{}),
std::make_pair(std::string("b2"), GetBValueViaOutputArg{}),
std::make_pair(std::string("b3"),
GetBValueViaOutputArgWithRawBufferFactory{}));
ASSERT_OK_AND_ASSIGN(
auto input_loader,
(AccessorsInputLoader<TestStruct, decltype(accessors_tuple)>::Build(
accessors_tuple)));
EXPECT_THAT(
input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"b2", f64}, {"b3", f64}}));
{
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto a_slot = layout_builder.AddSlot<int>();
layout_builder.AddSlot<char>();
auto b_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<std::string>();
auto b2_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<int16_t>();
auto b3_slot = layout_builder.AddSlot<double>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"b2", TypedSlot::FromSlot(b2_slot)},
{"b3", TypedSlot::FromSlot(b3_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(b2_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(b3_slot), 3.5);
}
{
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<std::string>();
auto a_slot = layout_builder.AddSlot<int>();
layout_builder.AddSlot<char>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
{
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<std::string>();
auto b_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<char>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
}
}
struct GetWithFactoryWithHelper {
void operator()(const TestStruct& s, RawBufferFactory*, double* res) const {
*res = 1;
}
double operator()(const TestStruct& s, RawBufferFactory*) const { return 3; }
};
struct GetWithoutFactoryWithHelper {
void operator()(const TestStruct& s, double* res) const { *res = 2; }
double operator()(const TestStruct& s) const { return 4; }
};
struct GetWithAllVariantsHelper {
void operator()(const TestStruct& s, RawBufferFactory*, double* res) const {
*res = 1;
}
double operator()(const TestStruct& s, RawBufferFactory*) const { return 3; }
void operator()(const TestStruct& s, double* res) const { *res = 2; }
double operator()(const TestStruct& s) const { return 4; }
};
TEST(InputLoaderTest, AccessorsInputLoaderChooseRightSignature) {
auto f64 = GetQType<double>();
auto accessors_tuple = std::make_tuple(
std::make_pair(std::string("a"), GetWithFactoryWithHelper{}),
std::make_pair(std::string("b"), GetWithoutFactoryWithHelper{}),
std::make_pair(std::string("c"), GetWithAllVariantsHelper{}));
ASSERT_OK_AND_ASSIGN(
auto input_loader,
(AccessorsInputLoader<TestStruct, decltype(accessors_tuple)>::Build(
accessors_tuple)));
EXPECT_THAT(input_loader,
InputLoaderSupports({{"a", f64}, {"b", f64}, {"c", f64}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<double>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({0, 0}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 1);
EXPECT_EQ(alloc.frame().Get(b_slot), 2);
EXPECT_EQ(alloc.frame().Get(c_slot), 1);
}
TEST(InputLoaderTest, AccessorsInputLoaderPartialBind) {
ASSERT_OK_AND_ASSIGN(auto input_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", GetAConstRef{}, "b", GetBValue{}));
{
FrameLayout::Builder layout_builder;
auto ignored_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<char>();
absl::flat_hash_map<std::string, TypedSlot> slots = {
{"b", TypedSlot::FromSlot(b_slot)},
{"ignored", TypedSlot::FromSlot(ignored_slot)}};
EXPECT_THAT(input_loader->Bind(slots),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unknown inputs: ignored")));
ASSERT_OK_AND_ASSIGN(auto bound_input_loader,
input_loader->PartialBind(&slots));
EXPECT_THAT(slots, UnorderedElementsAre(
Pair("ignored", TypedSlot::FromSlot(ignored_slot))));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(ignored_slot), 0);
}
{
FrameLayout::Builder layout_builder;
auto ignored_a_slot = layout_builder.AddSlot<int>();
auto ignored_b_slot = layout_builder.AddSlot<int>();
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<char>();
absl::flat_hash_map<std::string, TypedSlot> slots = {
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"ignored_a", TypedSlot::FromSlot(ignored_a_slot)},
{"ignored_b", TypedSlot::FromSlot(ignored_b_slot)},
};
EXPECT_THAT(input_loader->Bind(slots),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unknown inputs: ignored_a, ignored_b")));
ASSERT_OK_AND_ASSIGN(auto bound_input_loader,
input_loader->PartialBind(&slots));
EXPECT_THAT(slots,
UnorderedElementsAre(
Pair("ignored_a", TypedSlot::FromSlot(ignored_a_slot)),
Pair("ignored_b", TypedSlot::FromSlot(ignored_b_slot))));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(ignored_a_slot), 0);
EXPECT_EQ(alloc.frame().Get(ignored_b_slot), 0);
}
}
TEST(InputLoaderTest, NameDuplicates) {
auto accessors_tuple =
std::make_tuple(std::make_pair(std::string("a"), GetAConstRef{}),
std::make_pair(std::string("c"), GetAConstRef{}),
std::make_pair(std::string("a"), GetBValue{}),
std::make_pair(std::string("b"), GetAConstRef{}),
std::make_pair(std::string("c"), GetAConstRef{}));
using Loader = AccessorsInputLoader<TestStruct, decltype(accessors_tuple)>;
EXPECT_THAT(Loader::Build(accessors_tuple),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("accessors have duplicated names: a, c")));
}
TEST(InputLoaderTest, Errors) {
auto accessors_tuple =
std::make_tuple(std::make_pair(std::string("a"), GetAConstRef{}));
ASSERT_OK_AND_ASSIGN(
auto input_loader,
(AccessorsInputLoader<TestStruct, decltype(accessors_tuple)>::Build(
accessors_tuple)));
{
FrameLayout::Builder layout_builder;
auto dslot = layout_builder.AddSlot<double>();
EXPECT_THAT(
input_loader
->Bind({
{"a", TypedSlot::FromSlot(dslot)},
})
.status(),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr(
"slot types mismatch: a{expected:INT32, actual:FLOAT64}")));
}
}
TEST(InputLoaderTest, CreateAccessorsInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
ASSERT_OK_AND_ASSIGN(auto input_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; },
"c", [](const TestStruct& s) { return s.b * s.b; }));
EXPECT_THAT(input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"c", f64}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, AccessorsInputLoaderBufferFactoryPropagated) {
auto qbool = GetQType<bool>();
UnsafeArenaBufferFactory global_factory(1000);
ASSERT_OK_AND_ASSIGN(
auto input_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [&global_factory](const TestStruct&, RawBufferFactory* factory) {
return &global_factory == factory;
}));
EXPECT_THAT(input_loader, InputLoaderSupports({{"a", qbool}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<bool>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory));
EXPECT_TRUE(alloc.frame().Get(a_slot));
UnsafeArenaBufferFactory global_factory2(1000);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory2));
EXPECT_FALSE(alloc.frame().Get(a_slot));
}
using PairStringFunction = std::pair<std::string, std::function<int(int)>>;
template <size_t>
using PairStringFunctionByInt = PairStringFunction;
template <size_t... Is>
absl::StatusOr<std::unique_ptr<InputLoader<int>>>
CreateAccessorsInputLoaderManyInputs(std::index_sequence<Is...>) {
using T = std::tuple<PairStringFunctionByInt<Is>...>;
return CreateAccessorsInputLoaderFromTuple<int>(
T{PairStringFunction(absl::StrCat(Is), [](int) { return Is; })...});
}
TEST(InputLoaderTest, CreateAccessorsInputLoaderCompilationStressTest) {
constexpr size_t N = 50;
ASSERT_OK_AND_ASSIGN(auto loader, CreateAccessorsInputLoaderManyInputs(
std::make_index_sequence<N>()));
EXPECT_THAT(loader, InputLoaderSupports({{"1", GetQType<int>()},
{"2", GetQType<int>()},
{"49", GetQType<int>()}}));
}
}
} | public:
static absl::StatusOr<InputLoaderPtr<Input>> Build(
NameAccessorsTuple accessors) {
auto output_types_in_order = CreateOutputTypesInOrder(
accessors,
std::make_index_sequence<std::tuple_size<NameAccessorsTuple>::value>{});
RETURN_IF_ERROR(ValidateDuplicatedNames(output_types_in_order));
return InputLoaderPtr<Input>(
static_cast<InputLoader<Input>*>(new AccessorsInputLoader(
std::move(accessors), std::move(output_types_in_order))));
} | TEST(InputLoaderTest, AccessorsInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
auto accessors_tuple = std::make_tuple(
std::make_pair(std::string("a"), GetAConstRef{}),
std::make_pair(std::string("b"), GetBValue{}),
std::make_pair(std::string("b2"), GetBValueViaOutputArg{}),
std::make_pair(std::string("b3"),
GetBValueViaOutputArgWithRawBufferFactory{}));
ASSERT_OK_AND_ASSIGN(
auto input_loader,
(AccessorsInputLoader<TestStruct, decltype(accessors_tuple)>::Build(
accessors_tuple)));
EXPECT_THAT(
input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"b2", f64}, {"b3", f64}}));
{
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<double>();
auto a_slot = layout_builder.AddSlot<int>();
layout_builder.AddSlot<char>();
auto b_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<std::string>();
auto b2_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<int16_t>();
auto b3_slot = layout_builder.AddSlot<double>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"b2", TypedSlot::FromSlot(b2_slot)},
{"b3", TypedSlot::FromSlot(b3_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(b2_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(b3_slot), 3.5);
}
{
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<std::string>();
auto a_slot = layout_builder.AddSlot<int>();
layout_builder.AddSlot<char>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
}
{
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<std::string>();
auto b_slot = layout_builder.AddSlot<double>();
layout_builder.AddSlot<char>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
}
}
TEST(InputLoaderTest, NameDuplicates) {
auto accessors_tuple =
std::make_tuple(std::make_pair(std::string("a"), GetAConstRef{}),
std::make_pair(std::string("c"), GetAConstRef{}),
std::make_pair(std::string("a"), GetBValue{}),
std::make_pair(std::string("b"), GetAConstRef{}),
std::make_pair(std::string("c"), GetAConstRef{}));
using Loader = AccessorsInputLoader<TestStruct, decltype(accessors_tuple)>;
EXPECT_THAT(Loader::Build(accessors_tuple),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("accessors have duplicated names: a, c")));
}
TEST(InputLoaderTest, Errors) {
auto accessors_tuple =
std::make_tuple(std::make_pair(std::string("a"), GetAConstRef{}));
ASSERT_OK_AND_ASSIGN(
auto input_loader,
(AccessorsInputLoader<TestStruct, decltype(accessors_tuple)>::Build(
accessors_tuple)));
{
FrameLayout::Builder layout_builder;
auto dslot = layout_builder.AddSlot<double>();
EXPECT_THAT(
input_loader
->Bind({
{"a", TypedSlot::FromSlot(dslot)},
})
.status(),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr(
"slot types mismatch: a{expected:INT32, actual:FLOAT64}")));
}
}
TEST(InputLoaderTest, CreateAccessorsInputLoader) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
ASSERT_OK_AND_ASSIGN(auto input_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [](const TestStruct& s) { return s.a; },
"b", [](const TestStruct& s) { return s.b; },
"c", [](const TestStruct& s) { return s.b * s.b; }));
EXPECT_THAT(input_loader,
InputLoaderSupports({{"a", i32}, {"b", f64}, {"c", f64}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
auto c_slot = layout_builder.AddSlot<double>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
{"c", TypedSlot::FromSlot(c_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame()));
EXPECT_EQ(alloc.frame().Get(a_slot), 5);
EXPECT_EQ(alloc.frame().Get(b_slot), 3.5);
EXPECT_EQ(alloc.frame().Get(c_slot), 3.5 * 3.5);
}
TEST(InputLoaderTest, AccessorsInputLoaderBufferFactoryPropagated) {
auto qbool = GetQType<bool>();
UnsafeArenaBufferFactory global_factory(1000);
ASSERT_OK_AND_ASSIGN(
auto input_loader,
CreateAccessorsInputLoader<TestStruct>(
"a", [&global_factory](const TestStruct&, RawBufferFactory* factory) {
return &global_factory == factory;
}));
EXPECT_THAT(input_loader, InputLoaderSupports({{"a", qbool}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<bool>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<TestStruct> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory));
EXPECT_TRUE(alloc.frame().Get(a_slot));
UnsafeArenaBufferFactory global_factory2(1000);
ASSERT_OK(bound_input_loader({5, 3.5}, alloc.frame(), &global_factory2));
EXPECT_FALSE(alloc.frame().Get(a_slot));
} |
#ifndef TENSORSTORE_UTIL_INT4_H_
#define TENSORSTORE_UTIL_INT4_H_
#include <cmath>
#include <cstdint>
#include <limits>
#include <type_traits>
#include <nlohmann/json_fwd.hpp>
namespace tensorstore {
class Int4Padded;
}
namespace std {
template <>
struct numeric_limits<::tensorstore::Int4Padded>;
}
namespace tensorstore {
namespace internal {
constexpr int8_t SignedTrunc4(int8_t x) {
return static_cast<int8_t>(static_cast<uint8_t>(x) << 4) >> 4;
}
}
class Int4Padded {
public:
constexpr Int4Padded() : rep_(0) {}
template <typename T,
typename = std::enable_if_t<std::is_convertible_v<T, int8_t>>>
constexpr explicit Int4Padded(T x)
: rep_(internal::SignedTrunc4(static_cast<int8_t>(x))) {}
constexpr operator int8_t() const {
return internal::SignedTrunc4(rep_);
}
Int4Padded& operator=(bool v) { return *this = static_cast<Int4Padded>(v); }
template <typename T>
std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded&> operator=(
T v) {
return *this = static_cast<Int4Padded>(v);
}
#define TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(OP) \
friend Int4Padded operator OP(Int4Padded a, Int4Padded b) { \
return Int4Padded(a.rep_ OP b.rep_); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded> \
operator OP(Int4Padded a, T b) { \
return Int4Padded(a.rep_ OP b); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded> \
operator OP(T a, Int4Padded b) { \
return Int4Padded(a OP b.rep_); \
} \
#define TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(OP) \
friend Int4Padded& operator OP##=(Int4Padded& a, Int4Padded b) { \
return a = Int4Padded(a.rep_ OP b.rep_); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, Int4Padded&> \
operator OP##=(Int4Padded& a, T b) { \
return a = Int4Padded(a.rep_ OP b); \
} \
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(>>)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(>>)
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP
friend Int4Padded operator~(Int4Padded a) {
Int4Padded result;
result.rep_ = internal::SignedTrunc4(~a.rep_);
return result;
}
friend Int4Padded operator-(Int4Padded a) {
Int4Padded result;
result.rep_ = internal::SignedTrunc4(-a.rep_);
return result;
}
friend Int4Padded operator+(Int4Padded a) { return a; }
friend Int4Padded operator++(Int4Padded& a) {
a += Int4Padded(1);
return a;
}
friend Int4Padded operator--(Int4Padded& a) {
a -= Int4Padded(1);
return a;
}
friend Int4Padded operator++(Int4Padded& a, int) {
Int4Padded original_value = a;
++a;
return original_value;
}
friend Int4Padded operator--(Int4Padded& a, int) {
Int4Padded original_value = a;
--a;
return original_value;
}
template <template <typename U, typename V, typename... Args>
class ObjectType ,
template <typename U, typename... Args>
class ArrayType ,
class StringType , class BooleanType ,
class NumberIntegerType ,
class NumberUnsignedType ,
class NumberFloatType ,
template <typename U> class AllocatorType ,
template <typename T, typename SFINAE = void>
class JSONSerializer ,
class BinaryType >
friend void to_json(
::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType,
NumberIntegerType, NumberUnsignedType,
NumberFloatType, AllocatorType, JSONSerializer,
BinaryType>& j,
Int4Padded v) {
j = static_cast<NumberIntegerType>(v);
}
constexpr friend bool operator==(const Int4Padded& a, const Int4Padded& b) {
return internal::SignedTrunc4(a.rep_) == internal::SignedTrunc4(b.rep_);
}
constexpr friend bool operator!=(const Int4Padded& a, const Int4Padded& b) {
return !(a == b);
}
struct bitcast_construct_t {};
explicit constexpr Int4Padded(bitcast_construct_t, int8_t rep) : rep_(rep) {}
int8_t rep_;
};
inline Int4Padded abs(Int4Padded x) {
x.rep_ = internal::SignedTrunc4(::std::abs(x.rep_));
return x;
}
inline Int4Padded pow(Int4Padded x, Int4Padded y) {
return Int4Padded(std::pow(static_cast<int8_t>(x), static_cast<int8_t>(y)));
}
}
namespace std {
template <>
struct numeric_limits<tensorstore::Int4Padded> {
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = true;
static constexpr bool is_exact = true;
static constexpr bool has_infinity = false;
static constexpr bool has_quiet_NaN = false;
static constexpr bool has_signaling_NaN = false;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = true;
static constexpr int digits = 3;
static constexpr int digits10 = 0;
static constexpr int max_digits10 = 0;
static constexpr int radix = 2;
static constexpr tensorstore::Int4Padded min() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{-8});
}
static constexpr tensorstore::Int4Padded lowest() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{-8});
}
static constexpr tensorstore::Int4Padded max() {
return tensorstore::Int4Padded(
tensorstore::Int4Padded::bitcast_construct_t{}, int8_t{7});
}
};
}
#endif | #include "tensorstore/util/int4.h"
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_gtest.h"
namespace {
using Int4 = tensorstore::Int4Padded;
Int4 Bitcast(int8_t x) { return absl::bit_cast<Int4>(x); }
constexpr std::pair<int8_t, Int4> kInt8ToInt4[] = {
{-10, Int4(6)}, {-9, Int4(7)}, {-8, Int4(-8)}, {-7, Int4(-7)},
{-6, Int4(-6)}, {-5, Int4(-5)}, {-4, Int4(-4)}, {-3, Int4(-3)},
{-2, Int4(-2)}, {-1, Int4(-1)}, {0, Int4(0)}, {1, Int4(1)},
{2, Int4(2)}, {3, Int4(3)}, {4, Int4(4)}, {5, Int4(5)},
{6, Int4(6)}, {7, Int4(7)}, {8, Int4(-8)}, {9, Int4(-7)},
{10, Int4(-6)},
};
constexpr std::pair<Int4, int8_t> kInt4ToInt8[] = {
{Int4(-8), -8}, {Int4(-7), -7}, {Int4(-6), -6}, {Int4(-5), -5},
{Int4(-4), -4}, {Int4(-3), -3}, {Int4(-2), -2}, {Int4(-1), -1},
{Int4(0), 0}, {Int4(1), 1}, {Int4(2), 2}, {Int4(3), 3},
{Int4(4), 4}, {Int4(5), 5}, {Int4(6), 6}, {Int4(7), 7},
};
TEST(Int4Test, Int8ToInt4) {
for (const auto& [i8, i4] : kInt8ToInt4) {
EXPECT_EQ(static_cast<Int4>(i8), i4);
}
}
TEST(Int4Test, Int4ToInt8) {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_EQ(static_cast<int8_t>(i4), i8);
}
}
template <typename X>
void TestInt4ToXToInt4() {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_EQ(static_cast<Int4>(static_cast<X>(i4)), i4);
}
}
TEST(Int4Test, Int4ToInt32ToInt4) { TestInt4ToXToInt4<int32_t>(); }
TEST(Int4Test, Int4ToFloatToInt4) { TestInt4ToXToInt4<float>(); }
TEST(Int4Test, Int4ToDoubleToInt4) { TestInt4ToXToInt4<double>(); }
TEST(Int4Test, Arithmetic) {
EXPECT_EQ(Int4(1) + Int4(2), Int4(3));
EXPECT_EQ(Int4(7) + Int4(2), Int4(-7));
EXPECT_EQ(Int4(3) - Int4(5), Int4(-2));
EXPECT_EQ(Int4(5) * Int4(-7), Int4(-3));
EXPECT_EQ(Int4(-8) / Int4(3), Int4(-2));
EXPECT_EQ(Int4(-7) % Int4(3), Int4(-1));
}
TEST(Int4Test, BitwiseBinary) {
EXPECT_EQ(Int4(0b0110) & Int4(0b1011), Int4(0b0010));
EXPECT_EQ(Int4(0b0110) | Int4(0b1011), Int4(0b1111));
EXPECT_EQ(Int4(0b0110) ^ Int4(0b1011), Int4(0b1101));
}
TEST(Int4Test, BitwiseUnaryInverse) {
EXPECT_EQ(~Int4(0b1011), Int4(0b0100));
EXPECT_EQ(~Int4(0b0110), Int4(0b1001));
}
TEST(Int4Test, BitwiseShift) {
EXPECT_EQ(Int4(0b0011) << Int4(0), Int4(0b0011));
EXPECT_EQ(Int4(0b0011) << Int4(1), Int4(0b0110));
EXPECT_EQ(Int4(0b0011) << Int4(2), Int4(0b1100));
EXPECT_EQ(Int4(0b0011) << Int4(3), Int4(0b1000));
EXPECT_EQ(Int4(0b0011) << Int4(4), Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << Int4(5), Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << int8_t{0}, Int4(0b0011));
EXPECT_EQ(Int4(0b0011) << int8_t{1}, Int4(0b0110));
EXPECT_EQ(Int4(0b0011) << int8_t{2}, Int4(0b1100));
EXPECT_EQ(Int4(0b0011) << int8_t{3}, Int4(0b1000));
EXPECT_EQ(Int4(0b0011) << int8_t{4}, Int4(0b0000));
EXPECT_EQ(Int4(0b0011) << int8_t{5}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(0), Int4(0b0100));
EXPECT_EQ(Int4(0b0100) >> Int4(1), Int4(0b0010));
EXPECT_EQ(Int4(0b0100) >> Int4(2), Int4(0b0001));
EXPECT_EQ(Int4(0b0100) >> Int4(3), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(4), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> Int4(5), Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{0}, Int4(0b0100));
EXPECT_EQ(Int4(0b0100) >> int8_t{1}, Int4(0b0010));
EXPECT_EQ(Int4(0b0100) >> int8_t{2}, Int4(0b0001));
EXPECT_EQ(Int4(0b0100) >> int8_t{3}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{4}, Int4(0b0000));
EXPECT_EQ(Int4(0b0100) >> int8_t{5}, Int4(0b0000));
EXPECT_EQ(Int4(0b1010) >> Int4(0), Int4(0b1010));
EXPECT_EQ(Int4(0b1010) >> Int4(1), Int4(0b1101));
EXPECT_EQ(Int4(0b1010) >> Int4(2), Int4(0b1110));
EXPECT_EQ(Int4(0b1010) >> Int4(3), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> Int4(4), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> Int4(5), Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{0}, Int4(0b1010));
EXPECT_EQ(Int4(0b1010) >> int8_t{1}, Int4(0b1101));
EXPECT_EQ(Int4(0b1010) >> int8_t{2}, Int4(0b1110));
EXPECT_EQ(Int4(0b1010) >> int8_t{3}, Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{4}, Int4(0b1111));
EXPECT_EQ(Int4(0b1010) >> int8_t{5}, Int4(0b1111));
}
TEST(Int4Test, Abs) {
EXPECT_EQ(abs(Int4(7)), Int4(7));
EXPECT_EQ(abs(Int4(0)), Int4(0));
EXPECT_EQ(abs(Int4(-7)), Int4(7));
EXPECT_EQ(abs(Int4(-8)), Int4(-8));
}
TEST(Int4Test, Pow) {
EXPECT_EQ(pow(Int4(2), Int4(0)), Int4(1));
EXPECT_EQ(pow(Int4(2), Int4(1)), Int4(2));
EXPECT_EQ(pow(Int4(2), Int4(2)), Int4(4));
}
TEST(Int4Test, Comparison) {
for (int i = 0; i <= 15; i++) {
const Int4 a = kInt4ToInt8[i].first;
EXPECT_EQ(a, a);
EXPECT_LE(a, a);
EXPECT_GE(a, a);
for (int j = i + 1; j <= 15; j++) {
const Int4 b = kInt4ToInt8[j].first;
EXPECT_NE(a, b);
EXPECT_LT(a, b);
EXPECT_LE(a, b);
EXPECT_GT(b, a);
EXPECT_GE(b, a);
}
}
}
TEST(Int4Test, EquivalentRepresentationsCompareEqual) {
for (int low_nibble = 0; low_nibble <= 15; low_nibble++) {
const Int4 answer = Int4(low_nibble);
for (int high_nibble_a = 0; high_nibble_a <= 15; high_nibble_a++) {
for (int high_nibble_b = 0; high_nibble_b <= 15; high_nibble_b++) {
const int8_t a = low_nibble | (high_nibble_a << 4);
const int8_t b = low_nibble | (high_nibble_b << 4);
const Int4 a4 = Bitcast(a);
const Int4 b4 = Bitcast(b);
EXPECT_EQ(a4, answer);
EXPECT_EQ(b4, answer);
EXPECT_EQ(a4, b4);
}
}
}
}
TEST(Int4Test, NonCanonicalRepresentationsCompareCorrectly) {
EXPECT_LT(Bitcast(0xD3), Bitcast(0xE5));
EXPECT_LE(Bitcast(0xD3), Bitcast(0xE5));
EXPECT_GT(Bitcast(0x33), Bitcast(0x4A));
EXPECT_GE(Bitcast(0x33), Bitcast(0x4A));
}
TEST(Int4Test, JsonConversion) {
for (const auto& [i4, i8] : kInt4ToInt8) {
EXPECT_THAT(::nlohmann::json(i4), tensorstore::MatchesJson(i8));
}
}
} | \
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(+)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(-)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(*)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(/)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(%)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(&)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(|)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(^)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(<<)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP(>>)
TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP(>>)
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_OP
#undef TENSORSTORE_INTERNAL_INT4_PADDED_ARITHMETIC_ASSIGN_OP
friend Int4Padded operator~(Int4Padded a) {
Int4Padded result;
result.rep_ = internal::SignedTrunc4(~a.rep_);
return result;
} | constexpr std::pair<int8_t, Int4> kInt8ToInt4[] = {
{-10, Int4(6)}, {-9, Int4(7)}
, {-8, Int4(-8)}, {-7, Int4(-7)}
,
{-6, Int4(-6)}, {-5, Int4(-5)}
, {-4, Int4(-4)}, {-3, Int4(-3)}
,
{-2, Int4(-2)}, {-1, Int4(-1)}
, {8, Int4(-8)}, {9, Int4(-7)}
,
{10, Int4(-6)},
};
constexpr std::pair<Int4, int8_t> kInt4ToInt8[] = {
{Int4(-8), -8}, {Int4(-7), -7}, {Int4(-6), -6}, {Int4(-5), -5},
{Int4(-4), -4}, {Int4(-3), -3}, {Int4(-2), -2}, {Int4(-1), -1},
{Int4(0), 0}, {Int4(1), 1}, {Int4(2), 2}, {Int4(3), 3},
{Int4(4), 4}, {Int4(5), 5}, {Int4(6), 6}, {Int4(7), 7},
}
EXPECT_EQ(Int4(7) + Int4(2), Int4(-7));
EXPECT_EQ(Int4(3) - Int4(5), Int4(-2));
EXPECT_EQ(Int4(5) * Int4(-7), Int4(-3));
EXPECT_EQ(Int4(-8) / Int4(3), Int4(-2));
EXPECT_EQ(Int4(-7) % Int4(3), Int4(-1));
}
TEST(Int4Test, BitwiseBinary) {
EXPECT_EQ(Int4(0b0110) & Int4(0b1011), Int4(0b0010));
EXPECT_EQ(Int4(0b0110) | Int4(0b1011), Int4(0b1111));
EXPECT_EQ(Int4(0b0110) ^ Int4(0b1011), Int4(0b1101));
}
EXPECT_EQ(abs(Int4(-7)), Int4(7));
EXPECT_EQ(abs(Int4(-8)), Int4(-8));
}
TEST(Int4Test, Pow) {
EXPECT_EQ(pow(Int4(2), Int4(0)), Int4(1));
EXPECT_EQ(pow(Int4(2), Int4(1)), Int4(2));
EXPECT_EQ(pow(Int4(2), Int4(2)), Int4(4));
}
constexpr std::pair<int8_t, Int4> kInt8ToInt4[] = {
{-10, Int4(6)}, {-9, Int4(7)}
, {-8, Int4(-8)}, {-7, Int4(-7)}
,
{-6, Int4(-6)}, {-5, Int4(-5)}
, {-4, Int4(-4)}, {-3, Int4(-3)}
,
{-2, Int4(-2)}, {-1, Int4(-1)}
, {8, Int4(-8)}, {9, Int4(-7)}
,
{10, Int4(-6)},
};
constexpr std::pair<Int4, int8_t> kInt4ToInt8[] = {
{Int4(-8), -8}, {Int4(-7), -7}, {Int4(-6), -6}, {Int4(-5), -5},
{Int4(-4), -4}, {Int4(-3), -3}, {Int4(-2), -2}, {Int4(-1), -1},
{Int4(0), 0}, {Int4(1), 1}, {Int4(2), 2}, {Int4(3), 3},
{Int4(4), 4}, {Int4(5), 5}, {Int4(6), 6}, {Int4(7), 7},
}
EXPECT_EQ(Int4(7) + Int4(2), Int4(-7));
EXPECT_EQ(Int4(3) - Int4(5), Int4(-2));
EXPECT_EQ(Int4(5) * Int4(-7), Int4(-3));
EXPECT_EQ(Int4(-8) / Int4(3), Int4(-2));
EXPECT_EQ(Int4(-7) % Int4(3), Int4(-1));
}
TEST(Int4Test, BitwiseBinary) {
EXPECT_EQ(Int4(0b0110) & Int4(0b1011), Int4(0b0010));
EXPECT_EQ(Int4(0b0110) | Int4(0b1011), Int4(0b1111));
EXPECT_EQ(Int4(0b0110) ^ Int4(0b1011), Int4(0b1101));
}
EXPECT_EQ(abs(Int4(-7)), Int4(7));
EXPECT_EQ(abs(Int4(-8)), Int4(-8));
}
TEST(Int4Test, Pow) {
EXPECT_EQ(pow(Int4(2), Int4(0)), Int4(1));
EXPECT_EQ(pow(Int4(2), Int4(1)), Int4(2));
EXPECT_EQ(pow(Int4(2), Int4(2)), Int4(4));
} |
#ifndef TENSORFLOW_TSL_PLATFORM_REFCOUNT_H_
#define TENSORFLOW_TSL_PLATFORM_REFCOUNT_H_
#include <atomic>
#include <map>
#include <memory>
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tsl {
namespace core {
class RefCounted {
public:
RefCounted();
void Ref() const;
bool Unref() const;
int_fast32_t RefCount() const;
bool RefCountIsOne() const;
protected:
virtual ~RefCounted();
bool TryRef() const;
virtual void NotifyDeleted() const;
private:
mutable std::atomic_int_fast32_t ref_;
RefCounted(const RefCounted&) = delete;
void operator=(const RefCounted&) = delete;
};
struct RefCountDeleter {
void operator()(const RefCounted* o) const { o->Unref(); }
};
template <typename T>
class RefCountPtr;
template <typename T>
ABSL_MUST_USE_RESULT RefCountPtr<T> GetNewRef(T* ptr) {
static_assert(std::is_base_of<RefCounted, T>::value);
if (ptr == nullptr) return RefCountPtr<T>();
ptr->Ref();
RefCountPtr<T> ret(ptr);
return ret;
}
template <typename T>
class RefCountPtr : public std::unique_ptr<T, RefCountDeleter> {
public:
using std::unique_ptr<T, RefCountDeleter>::unique_ptr;
ABSL_MUST_USE_RESULT RefCountPtr GetNewRef() const {
if (this->get() == nullptr) return RefCountPtr<T>();
this->get()->Ref();
return RefCountPtr<T>(this->get());
}
};
class ScopedUnref {
public:
explicit ScopedUnref(const RefCounted* o) : obj_(o) {}
~ScopedUnref() {
if (obj_) obj_->Unref();
}
private:
const RefCounted* obj_;
ScopedUnref(const ScopedUnref&) = delete;
void operator=(const ScopedUnref&) = delete;
};
template <typename T>
class WeakPtr;
using WeakNotifyFn = std::function<void()>;
class WeakRefCounted : public RefCounted {
public:
int WeakRefCount() const {
return data_->RefCount() - 1;
}
protected:
void NotifyDeleted() const override { data_->Notify(); }
private:
struct WeakRefData : public RefCounted {
explicit WeakRefData(WeakRefCounted* ptr) : ptr(ptr), next_notifier_id(1) {}
mutable mutex mu;
WeakRefCounted* ptr TF_GUARDED_BY(mu);
std::map<int, WeakNotifyFn> notifiers;
int next_notifier_id;
void Notify() {
mutex_lock ml(mu);
while (!notifiers.empty()) {
auto iter = notifiers.begin();
WeakNotifyFn notify_fn = std::move(iter->second);
notifiers.erase(iter);
mu.unlock();
notify_fn();
mu.lock();
}
ptr = nullptr;
}
WeakRefCounted* GetNewRef() {
mutex_lock ml(mu);
if (ptr != nullptr && ptr->TryRef()) {
return ptr;
}
return nullptr;
}
int AddNotifier(WeakNotifyFn notify_fn) {
mutex_lock ml(mu);
if (ptr == nullptr) {
return 0;
}
int notifier_id = next_notifier_id++;
notifiers.emplace(notifier_id, std::move(notify_fn));
return notifier_id;
}
int DupNotifier(int notifier_id) {
mutex_lock ml(mu);
auto iter = notifiers.find(notifier_id);
if (iter != notifiers.end()) {
int notifier_id = next_notifier_id++;
notifiers.emplace(notifier_id, iter->second);
return notifier_id;
}
return 0;
}
void RemoveNotifier(int notifier_id) {
mutex_lock ml(mu);
notifiers.erase(notifier_id);
}
};
mutable RefCountPtr<WeakRefData> data_{new WeakRefData(this)};
template <typename T>
friend class WeakPtr;
friend struct WeakRefData;
};
template <typename T>
class WeakPtr {
public:
explicit WeakPtr(WeakRefCounted* ptr = nullptr,
WeakNotifyFn notify_fn = nullptr)
: data_(nullptr), notifier_id_(0) {
if (ptr != nullptr) {
ptr->data_->Ref();
data_.reset(ptr->data_.get());
if (notify_fn) {
notifier_id_ = data_->AddNotifier(notify_fn);
}
}
}
~WeakPtr() {
if (data_ != nullptr && notifier_id_ != 0) {
data_->RemoveNotifier(notifier_id_);
}
}
WeakPtr(const WeakPtr& other) { operator=(other); }
WeakPtr& operator=(const WeakPtr& other) {
if (data_ != nullptr && notifier_id_ != 0) {
data_->RemoveNotifier(notifier_id_);
}
other.data_->Ref();
data_.reset(other.data_.get());
notifier_id_ = data_->DupNotifier(other.notifier_id_);
return *this;
}
WeakPtr(WeakPtr&& other) noexcept {
data_ = std::move(other.data_);
notifier_id_ = other.notifier_id_;
other.notifier_id_ = 0;
}
WeakPtr& operator=(WeakPtr&& other) noexcept {
if (this != &other) {
if (data_ != nullptr && notifier_id_ != 0) {
data_->RemoveNotifier(notifier_id_);
}
data_ = std::move(other.data_);
notifier_id_ = other.notifier_id_;
other.notifier_id_ = 0;
}
return *this;
}
RefCountPtr<T> GetNewRef() const {
RefCountPtr<T> ref;
if (data_ != nullptr) {
WeakRefCounted* ptr = data_->GetNewRef();
ref.reset(static_cast<T*>(ptr));
}
return std::move(ref);
}
private:
RefCountPtr<WeakRefCounted::WeakRefData> data_;
int notifier_id_;
};
inline RefCounted::RefCounted() : ref_(1) {}
inline RefCounted::~RefCounted() {
DCHECK_EQ(ref_.load(), 0);
}
inline void RefCounted::Ref() const {
int_fast32_t old_ref = ref_.fetch_add(1, std::memory_order_relaxed);
DCHECK_GT(old_ref, 0);
}
inline bool RefCounted::TryRef() const {
int_fast32_t old_ref = ref_.load();
while (old_ref != 0) {
if (ref_.compare_exchange_weak(old_ref, old_ref + 1)) {
return true;
}
}
return false;
}
inline bool RefCounted::Unref() const {
DCHECK_GT(ref_.load(), 0);
if (ref_.fetch_sub(1, std::memory_order_acq_rel) == 1) {
NotifyDeleted();
delete this;
return true;
}
return false;
}
inline int_fast32_t RefCounted::RefCount() const {
return ref_.load(std::memory_order_acquire);
}
inline void RefCounted::NotifyDeleted() const {}
inline bool RefCounted::RefCountIsOne() const {
return (ref_.load(std::memory_order_acquire) == 1);
}
}
}
#endif | #include "tsl/platform/refcount.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace core {
namespace {
class RefTest : public ::testing::Test {
public:
RefTest() {
constructed_ = 0;
destroyed_ = 0;
}
static int constructed_;
static int destroyed_;
};
int RefTest::constructed_;
int RefTest::destroyed_;
class MyRef : public RefCounted {
public:
MyRef() { RefTest::constructed_++; }
~MyRef() override { RefTest::destroyed_++; }
};
TEST_F(RefTest, New) {
MyRef* ref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(1, constructed_);
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, RefUnref) {
MyRef* ref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
ref->Ref();
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, RefCountOne) {
MyRef* ref = new MyRef;
ASSERT_TRUE(ref->RefCountIsOne());
ref->Unref();
}
TEST_F(RefTest, RefCountNotOne) {
MyRef* ref = new MyRef;
ref->Ref();
ASSERT_FALSE(ref->RefCountIsOne());
ref->Unref();
ref->Unref();
}
TEST_F(RefTest, ConstRefUnref) {
const MyRef* cref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
cref->Ref();
ASSERT_EQ(0, destroyed_);
cref->Unref();
ASSERT_EQ(0, destroyed_);
cref->Unref();
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, ReturnOfUnref) {
MyRef* ref = new MyRef;
ref->Ref();
EXPECT_FALSE(ref->Unref());
EXPECT_TRUE(ref->Unref());
}
TEST_F(RefTest, ScopedUnref) {
{ ScopedUnref unref(new MyRef); }
EXPECT_EQ(destroyed_, 1);
}
TEST_F(RefTest, ScopedUnref_Nullptr) {
{ ScopedUnref unref(nullptr); }
EXPECT_EQ(destroyed_, 0);
}
TEST_F(RefTest, RefCountPtr) {
const RefCountPtr<MyRef> cref = RefCountPtr<MyRef>(new MyRef);
ASSERT_TRUE(cref.get() != nullptr);
ASSERT_EQ(cref->RefCount(), 1);
{
const RefCountPtr<MyRef> cref2 = cref.GetNewRef();
ASSERT_EQ(cref->RefCount(), 2);
}
ASSERT_EQ(cref->RefCount(), 1);
}
class ObjType : public WeakRefCounted {
public:
ObjType() : ObjType(unused_dtor_called_) {}
explicit ObjType(int& dtor_called) : dtor_called_(dtor_called) {}
~ObjType() override { dtor_called_++; }
int& dtor_called_;
static int unused_dtor_called_;
};
int ObjType::unused_dtor_called_ = 0;
TEST(WeakPtr, SingleThread) {
auto obj = new ObjType();
WeakPtr<ObjType> weakptr(obj);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 1);
EXPECT_NE(weakptr.GetNewRef(), nullptr);
obj->Unref();
EXPECT_EQ(weakptr.GetNewRef(), nullptr);
}
TEST(WeakPtr, MultiThreadedWeakRef) {
std::atomic<int> hit_destructed{0};
auto env = Env::Default();
for (int i = 0; i < 100; i++) {
auto obj = new ObjType();
WeakPtr<ObjType> weakptr(obj);
bool obj_destructed = false;
EXPECT_EQ(obj->WeakRefCount(), 1);
auto fn = [&]() {
auto ref = weakptr.GetNewRef();
if (ref != nullptr) {
EXPECT_EQ(ref.get(), obj);
EXPECT_EQ(ref->WeakRefCount(), 1);
EXPECT_GE(ref->RefCount(), 1);
} else {
hit_destructed++;
EXPECT_TRUE(obj_destructed);
}
};
auto t1 = env->StartThread(ThreadOptions{}, "thread-1", fn);
auto t2 = env->StartThread(ThreadOptions{}, "thread-2", fn);
env->SleepForMicroseconds(10);
obj_destructed = true;
obj->Unref();
delete t1;
delete t2;
EXPECT_EQ(weakptr.GetNewRef(), nullptr);
}
if (hit_destructed == 0) {
LOG(WARNING) << "The destructed weakref test branch is not exercised.";
}
if (hit_destructed == 200) {
LOG(WARNING) << "The valid weakref test branch is not exercised.";
}
}
TEST(WeakPtr, NotifyCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 2);
EXPECT_NE(weakptr1.GetNewRef(), nullptr);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
obj->Unref();
EXPECT_EQ(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(num_calls2, 1);
}
TEST(WeakPtr, NotifyCalledBeforeDestructor) {
int dtor_called = 0;
auto obj = new ObjType(dtor_called);
int num_calls1 = 0;
auto notify_fn1 = [&num_calls1, &dtor_called]() {
num_calls1++;
EXPECT_EQ(dtor_called, 0);
};
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 1);
EXPECT_NE(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
obj->Unref();
EXPECT_EQ(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(dtor_called, 1);
}
TEST(WeakPtr, CopyTargetCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
WeakPtr<ObjType> weakptr3(weakptr1);
weakptr2 = weakptr1;
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 3);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_NE(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
obj->Unref();
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 3);
EXPECT_EQ(num_calls2, 0);
}
TEST(WeakPtr, MoveTargetNotCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
int num_calls3 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
auto notify_fn3 = [&num_calls3]() { num_calls3++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
WeakPtr<ObjType> weakptr3(WeakPtr<ObjType>(obj, notify_fn3));
weakptr2 = std::move(weakptr1);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 2);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_NE(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
EXPECT_EQ(num_calls3, 0);
obj->Unref();
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(num_calls2, 0);
EXPECT_EQ(num_calls3, 1);
}
TEST(WeakPtr, DestroyedNotifyNotCalled) {
auto obj = new ObjType();
int num_calls = 0;
auto notify_fn = [&num_calls]() { num_calls++; };
{ WeakPtr<ObjType> weakptr(obj, notify_fn); }
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 0);
EXPECT_EQ(num_calls, 0);
obj->Unref();
EXPECT_EQ(num_calls, 0);
}
}
}
} | ~ScopedUnref() {
if (obj_) obj_->Unref();
} | TEST_F(RefTest, ScopedUnref) {
{ ScopedUnref unref(new MyRef); }
EXPECT_EQ(destroyed_, 1);
} |
#include "tensorflow/core/common_runtime/replicate_constants_pass.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr int64_t kMaxSize = 16;
void SetUniqueName(Graph* graph, Node* node) {
node->set_name(graph->NewName(absl::StrCat(node->name(), "/replicate")));
}
bool HasControlOut(Node* node) {
auto control_out_it =
std::find_if(node->out_edges().begin(), node->out_edges().end(),
[](const auto& e) { return e->IsControlEdge(); });
return control_out_it != node->out_edges().end();
}
bool HasCpuDevice(const Node* node) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(node->assigned_device_name(), &device))
return false;
return device.type == "CPU";
}
Status DeviceNameToCpuDeviceNameWithDeviceId(const string& device_name,
string* host_device_name) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(device_name, &device)) {
return absl::InternalError(
absl::StrCat("Could not parse device name ", device_name));
}
if (flags::Global().enable_aggressive_constant_replication.value() &&
device.type == "CPU") {
*host_device_name = device_name;
} else {
device.type = "CPU";
device.has_type = true;
device.id = 0;
device.has_id = true;
*host_device_name = DeviceNameUtils::ParsedNameToString(device);
}
return absl::OkStatus();
}
Status GetDestinationCpuDevice(const Node* dst, std::string* device) {
if (!dst->has_assigned_device_name())
return absl::AbortedError(
absl::StrCat("Node name: ", dst->name(), " has no assigned device."));
return DeviceNameToCpuDeviceNameWithDeviceId(dst->assigned_device_name(),
device);
}
Status GetSuccessorEdges(
Node* node,
absl::btree_map<std::string, std::vector<const Edge*>>& device_to_edges) {
for (const auto& edge : node->out_edges()) {
const Node* dst = edge->dst();
std::string device;
TF_RETURN_IF_ERROR(GetDestinationCpuDevice(dst, &device));
if (!device_to_edges.count(device)) device_to_edges.insert({device, {}});
device_to_edges[device].push_back(edge);
}
return absl::OkStatus();
}
void ReplicateToEachDevice(
Graph* graph, Node* node,
absl::btree_map<std::string, std::vector<const Edge*>>& device_to_edges) {
for (const auto& pair : device_to_edges) {
Node* copy = graph->CopyNode(node);
SetUniqueName(graph, copy);
const std::string device = pair.first;
copy->set_assigned_device_name(device);
for (const Edge* edge : pair.second) {
graph->AddEdge(copy, edge->src_output(), edge->dst(), edge->dst_input());
}
for (Node* src : node->in_nodes()) {
graph->AddControlEdge(src, copy, true);
}
}
graph->RemoveNode(node);
}
}
Status ReplicateConstantsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "replicate_constants_pass will replicate constants with "
"number-of-elements <= "
<< kMaxSize;
if (options.graph == nullptr) {
VLOG(1) << "No graph in replicate_constants_pass.";
return absl::OkStatus();
}
Graph* graph = options.graph->get();
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("before_replicate_constants_pass", *graph,
options.flib_def);
}
int64_t min_skipped = std::numeric_limits<int64_t>::max();
int64_t max_skipped = std::numeric_limits<int64_t>::min();
for (Node* node : graph->nodes()) {
if (!node->IsConstant()) continue;
if (node->out_edges().size() <= 1) continue;
if (HasControlOut(node)) continue;
const TensorProto* value = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "value", &value));
TF_ASSIGN_OR_RETURN(TensorShape shape,
TensorShape::BuildTensorShape(value->tensor_shape()));
if (shape.num_elements() > kMaxSize) {
min_skipped = std::min(min_skipped, shape.num_elements());
max_skipped = std::max(max_skipped, shape.num_elements());
continue;
}
if (!node->has_assigned_device_name()) continue;
if (!HasCpuDevice(node)) continue;
absl::btree_map<std::string, std::vector<const Edge*>> device_to_edges;
TF_RETURN_IF_ERROR(GetSuccessorEdges(node, device_to_edges));
if (device_to_edges.size() <= 1) continue;
ReplicateToEachDevice(graph, node, device_to_edges);
}
if (min_skipped != std::numeric_limits<int64_t>::max()) {
VLOG(1) << "replicate_constants_pass skipped replicating constants with "
"number of elements in the range "
<< min_skipped << " to " << max_skipped << ".";
}
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("after_replicate_constants_pass", *graph,
options.flib_def);
}
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, 3,
ReplicateConstantsPass);
} | #include "tensorflow/core/common_runtime/replicate_constants_pass.h"
#include <memory>
#include <string>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace tensorflow {
const char kCpu0[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0";
const char kCpu1[] = "/job:tpu_host_worker/replica:0/task:1/device:CPU:0";
const char kTpu00[] = "/job:tpu_host_worker/replica:0/task:0/device:TPU:0";
const char kTpu01[] = "/job:tpu_host_worker/replica:0/task:0/device:TPU:1";
const char kTpu10[] = "/job:tpu_host_worker/replica:0/task:1/device:TPU:0";
const char kTpu11[] = "/job:tpu_host_worker/replica:0/task:1/device:TPU:1";
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
CHECK(false) << "Unknown node name: " << name;
return nullptr;
}
Node* GetPredecessor(Node* node) {
auto it = node->in_nodes().begin();
CHECK(it != node->in_nodes().end())
<< "No predecessor for " << node->name() << "\n";
return *it;
}
bool IsEdge(Node* src, Node* dst) {
for (Node* node : src->out_nodes()) {
if (node == dst) return true;
}
return false;
}
TEST(ReplicateConstantsPassTest, TestSmallConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_EQ(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_EQ(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestLargeConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestControlOut) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
Output ctrl_succ =
ops::Const(scope.WithOpName("ctrl_succ"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "ctrl_succ")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
graph->AddControlEdge(GetNode(*graph, "const0"),
GetNode(*graph, "ctrl_succ"));
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestTpuConst) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst1")->set_assigned_device_name(kTpu10);
GetNode(*graph, "dst2")->set_assigned_device_name(kTpu10);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestSmallAndLargeConstants) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output small = ops::Const(scope.WithOpName("small"), 1.0f, TensorShape({}));
Output large =
ops::Const(scope.WithOpName("large"),
{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f});
ops::Add dst0(scope.WithOpName("dst0"), small, large);
ops::Add dst1(scope.WithOpName("dst1"), small, large);
ops::Add dst2(scope.WithOpName("dst2"), small, large);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "small")->set_assigned_device_name(kCpu0);
GetNode(*graph, "large")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* small0 = GetNode(*graph, "small/replicate/_0");
Node* small1 = GetNode(*graph, "small/replicate/_1");
Node* large = GetNode(*graph, "large");
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(small0->assigned_device_name(), kCpu0);
EXPECT_EQ(small1->assigned_device_name(), kCpu1);
EXPECT_EQ(large->assigned_device_name(), kCpu0);
EXPECT_EQ(dst0->assigned_device_name(), kCpu0);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_TRUE(IsEdge(small0, dst0));
EXPECT_TRUE(IsEdge(large, dst0));
EXPECT_TRUE(IsEdge(small1, dst1));
EXPECT_TRUE(IsEdge(large, dst1));
EXPECT_TRUE(IsEdge(small1, dst2));
EXPECT_TRUE(IsEdge(large, dst2));
}
TEST(ReplicateConstantsPassTest, TestTpuDestinations) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"), 1.0f, TensorShape({}));
ops::Negate dst00(scope.WithOpName("dst00"), const0);
ops::Negate dst01(scope.WithOpName("dst01"), const0);
ops::Negate dst10(scope.WithOpName("dst10"), const0);
ops::Negate dst11(scope.WithOpName("dst11"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst00")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst01")->set_assigned_device_name(kTpu01);
GetNode(*graph, "dst10")->set_assigned_device_name(kTpu10);
GetNode(*graph, "dst11")->set_assigned_device_name(kTpu11);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* const0 = GetNode(*graph, "const/replicate/_0");
Node* const1 = GetNode(*graph, "const/replicate/_1");
Node* dst00 = GetNode(*graph, "dst00");
Node* dst01 = GetNode(*graph, "dst01");
Node* dst10 = GetNode(*graph, "dst10");
Node* dst11 = GetNode(*graph, "dst11");
EXPECT_EQ(const0->assigned_device_name(), kCpu0);
EXPECT_EQ(const1->assigned_device_name(), kCpu1);
EXPECT_TRUE(IsEdge(const0, dst00));
EXPECT_TRUE(IsEdge(const0, dst01));
EXPECT_TRUE(IsEdge(const1, dst10));
EXPECT_TRUE(IsEdge(const1, dst11));
}
} | void ReplicateToEachDevice(
Graph* graph, Node* node,
absl::btree_map<std::string, std::vector<const Edge*>>& device_to_edges) {
for (const auto& pair : device_to_edges) {
Node* copy = graph->CopyNode(node);
SetUniqueName(graph, copy);
const std::string device = pair.first;
copy->set_assigned_device_name(device);
for (const Edge* edge : pair.second) {
graph->AddEdge(copy, edge->src_output(), edge->dst(), edge->dst_input());
}
for (Node* src : node->in_nodes()) {
graph->AddControlEdge(src, copy, true);
}
}
graph->RemoveNode(node);
} | TEST(ReplicateConstantsPassTest, TestSmallConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_EQ(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_EQ(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestLargeConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestControlOut) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
Output ctrl_succ =
ops::Const(scope.WithOpName("ctrl_succ"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "ctrl_succ")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
graph->AddControlEdge(GetNode(*graph, "const0"),
GetNode(*graph, "ctrl_succ"));
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestTpuConst) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst1")->set_assigned_device_name(kTpu10);
GetNode(*graph, "dst2")->set_assigned_device_name(kTpu10);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestSmallAndLargeConstants) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output small = ops::Const(scope.WithOpName("small"), 1.0f, TensorShape({}));
Output large =
ops::Const(scope.WithOpName("large"),
{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f});
ops::Add dst0(scope.WithOpName("dst0"), small, large);
ops::Add dst1(scope.WithOpName("dst1"), small, large);
ops::Add dst2(scope.WithOpName("dst2"), small, large);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "small")->set_assigned_device_name(kCpu0);
GetNode(*graph, "large")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* small0 = GetNode(*graph, "small/replicate/_0");
Node* small1 = GetNode(*graph, "small/replicate/_1");
Node* large = GetNode(*graph, "large");
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(small0->assigned_device_name(), kCpu0);
EXPECT_EQ(small1->assigned_device_name(), kCpu1);
EXPECT_EQ(large->assigned_device_name(), kCpu0);
EXPECT_EQ(dst0->assigned_device_name(), kCpu0);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_TRUE(IsEdge(small0, dst0));
EXPECT_TRUE(IsEdge(large, dst0));
EXPECT_TRUE(IsEdge(small1, dst1));
EXPECT_TRUE(IsEdge(large, dst1));
EXPECT_TRUE(IsEdge(small1, dst2));
EXPECT_TRUE(IsEdge(large, dst2));
} |
#include "tsl/profiler/convert/xplane_to_trace_events.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/trace_utils.h"
#include "tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/utils/xplane_utils.h"
#include "tsl/profiler/utils/xplane_visitor.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::profiler::XSpace;
void BuildDeviceAndResources(uint32 device_id, const XPlaneVisitor& plane,
Device* device) {
device->set_name(std::string(plane.Name()));
device->set_device_id(device_id);
bool sort_by_ordinal = (device_id == kHostThreadsDeviceId);
int ordinal = 0;
plane.ForEachLine([&](const XLineVisitor& line) {
uint32 resource_id = line.DisplayId();
Resource& resource = (*device->mutable_resources())[resource_id];
resource.set_resource_id(resource_id);
resource.set_name(std::string(line.DisplayName()));
if (sort_by_ordinal) {
resource.set_sort_index(++ordinal);
}
});
}
void ConvertXPlaneToTraceEvents(uint32 device_id, const XPlaneVisitor& xplane,
TraceContainer& container) {
BuildDeviceAndResources(device_id, xplane,
container.MutableDevice(device_id));
xplane.ForEachLine([device_id, &container](const XLineVisitor& xline) {
uint32 resource_id = xline.DisplayId();
if (xline.DisplayName() == tsl::profiler::kXlaAsyncOpLineName) {
return;
}
xline.ForEachEvent(
[device_id, resource_id, &container](const XEventVisitor& xevent) {
int64_t event_type =
xevent.Type().value_or(HostEventType::kUnknownHostEventType);
if (IsInternalEvent(event_type)) return;
TraceEvent* event = container.CreateEvent();
auto& args = *event->mutable_args();
event->set_device_id(device_id);
event->set_resource_id(resource_id);
if (xevent.HasDisplayName()) {
event->set_name(std::string(xevent.DisplayName()));
args["long_name"] = std::string(xevent.Name());
} else {
event->set_name(std::string(xevent.Name()));
}
event->set_timestamp_ps(xevent.TimestampPs());
event->set_duration_ps(xevent.DurationPs());
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.ValueCase() == XStat::VALUE_NOT_SET) return;
if (IsInternalStat(stat.Type())) return;
if (stat.Type() == StatType::kStepName) {
event->set_name(stat.ToString());
}
args[std::string(stat.Name())] = stat.ToString();
};
xevent.Metadata().ForEachStat(for_each_stat);
xevent.ForEachStat(for_each_stat);
});
});
}
}
uint64 GetTraceViewerMaxEvents() {
constexpr uint64 kMaxEvents = 1000000;
char* max_events = getenv("TF_PROFILER_TRACE_VIEWER_MAX_EVENTS");
if (max_events != nullptr) {
return std::stoull(max_events, nullptr, 10);
} else {
return kMaxEvents;
}
}
TraceContainer ConvertXSpaceToTraceContainer(const XSpace& xspace) {
TraceContainer container;
const XPlane* host_plane = FindPlaneWithName(xspace, kHostThreadsPlaneName);
if (host_plane != nullptr) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(host_plane);
ConvertXPlaneToTraceEvents(kHostThreadsDeviceId, xplane, container);
}
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(xspace, kGpuPlanePrefix);
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kTpuPlanePrefix);
}
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kCustomPlanePrefix);
}
for (const XPlane* device_plane : device_planes) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(device_plane);
uint32 device_id = kFirstDeviceId + xplane.Id();
ConvertXPlaneToTraceEvents(device_id, xplane, container);
}
uint64 viewer_max_events = GetTraceViewerMaxEvents();
container.CapEvents(viewer_max_events);
return container;
}
void ConvertXSpaceToTraceEventsString(const XSpace& xspace,
std::string* content) {
ConvertXSpaceToTraceContainer(xspace).FlushAndSerializeEvents(content);
}
}
} | #include "tsl/profiler/convert/xplane_to_trace_events.h"
#include <limits>
#include <utility>
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/trace_utils.h"
#include "tsl/profiler/utils/xplane_builder.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::profiler::XSpace;
void CreateXSpace(XSpace* space) {
XPlaneBuilder host_plane(space->add_planes());
host_plane.SetName(kHostThreadsPlaneName);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
thread1.SetName("thread1");
XEventBuilder event1 =
thread1.AddEvent(*host_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(150000);
event1.SetDurationNs(10000);
event1.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Relu"));
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
thread2.SetName("thread2");
XEventBuilder event2 =
thread2.AddEvent(*host_plane.GetOrCreateEventMetadata("event2"));
event2.SetTimestampNs(160000);
event2.SetDurationNs(10000);
event2.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Conv2D"));
XPlaneBuilder device_plane(space->add_planes());
device_plane.SetName(GpuPlaneName(0));
device_plane.SetId(0);
XLineBuilder stream1 = device_plane.GetOrCreateLine(30);
stream1.SetName("gpu stream 1");
XEventBuilder event3 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event3.SetTimestampNs(180000);
event3.SetDurationNs(10000);
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata("correlation id"),
55);
}
TEST(ConvertXPlaneToTraceEvents, Convert) {
XSpace xspace;
CreateXSpace(&xspace);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_EQ(container.trace().devices_size(), 2);
EXPECT_EQ(
container.trace().devices().at(kHostThreadsDeviceId).resources_size(), 2);
EXPECT_EQ(container.trace().devices().at(kFirstDeviceId).resources_size(), 1);
EXPECT_EQ(container.UnsortedEvents().size(), 3);
}
TEST(ConvertXPlaneToTraceEvents, SkipAsyncOps) {
XSpace xspace;
XPlaneBuilder device_plane(xspace.add_planes());
device_plane.SetName(GpuPlaneName(0));
XLineBuilder async_ops = device_plane.GetOrCreateLine(10);
async_ops.SetName(kXlaAsyncOpLineName);
XEventBuilder event1 =
async_ops.AddEvent(*device_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(100);
event1.SetDurationNs(1);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_THAT(container.UnsortedEvents(), ::testing::IsEmpty());
}
}
}
} | TraceContainer ConvertXSpaceToTraceContainer(const XSpace& xspace) {
TraceContainer container;
const XPlane* host_plane = FindPlaneWithName(xspace, kHostThreadsPlaneName);
if (host_plane != nullptr) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(host_plane);
ConvertXPlaneToTraceEvents(kHostThreadsDeviceId, xplane, container);
}
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(xspace, kGpuPlanePrefix);
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kTpuPlanePrefix);
}
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kCustomPlanePrefix);
}
for (const XPlane* device_plane : device_planes) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(device_plane);
uint32 device_id = kFirstDeviceId + xplane.Id();
ConvertXPlaneToTraceEvents(device_id, xplane, container);
}
uint64 viewer_max_events = GetTraceViewerMaxEvents();
container.CapEvents(viewer_max_events);
return container;
} | TEST(ConvertXPlaneToTraceEvents, Convert) {
XSpace xspace;
CreateXSpace(&xspace);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_EQ(container.trace().devices_size(), 2);
EXPECT_EQ(
container.trace().devices().at(kHostThreadsDeviceId).resources_size(), 2);
EXPECT_EQ(container.trace().devices().at(kFirstDeviceId).resources_size(), 1);
EXPECT_EQ(container.UnsortedEvents().size(), 3);
}
TEST(ConvertXPlaneToTraceEvents, SkipAsyncOps) {
XSpace xspace;
XPlaneBuilder device_plane(xspace.add_planes());
device_plane.SetName(GpuPlaneName(0));
XLineBuilder async_ops = device_plane.GetOrCreateLine(10);
async_ops.SetName(kXlaAsyncOpLineName);
XEventBuilder event1 =
async_ops.AddEvent(*device_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(100);
event1.SetDurationNs(1);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_THAT(container.UnsortedEvents(), ::testing::IsEmpty());
} |
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "mlir/IR/BuiltinOps.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/errors.h"
namespace mlir {
namespace mhlo {
namespace {
#define GEN_PASS_DEF_VERIFYTFXLALEGALIZATION
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.h.inc"
auto* mlir_failed_legalization_op_count =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_failed_legalization_op_count",
"Counts which op fails to legalize", "op_name");
auto* mlir_non_static_op_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_non_static_op_count",
"Counts which ops do not have static results", "op_name");
auto* mlir_non_static_op_skip_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_non_static_op_skip_count",
"Counts skipped ops which do not have static results", "op_name");
static const char* kMustBeConstantError =
"must have compile-time constant inputs and outputs.\n\n"
"XLA compilation requires that operator arguments that represent shapes or "
"dimensions be evaluated to concrete values at compile time. This error "
"means that a shape or dimension argument could not be evaluated at "
"compile time, usually because the value of the argument depends on a "
"parameter to the computation, on a variable, or on a stateful operation "
"such as a random number generator.";
static const DenseSet<mlir::TypeID>* operations_to_skip =
new DenseSet<mlir::TypeID>{mlir::TypeID::get<mhlo::EinsumOp>()};
class VerifyTFXLALegalization
: public impl::VerifyTFXLALegalizationBase<VerifyTFXLALegalization> {
public:
explicit VerifyTFXLALegalization(bool legalize_chlo) {
legalize_chlo_ = legalize_chlo;
}
void runOnOperation() override;
};
static void IncrementCounterFor(tensorflow::monitoring::Counter<1>* counter,
Operation* op) {
counter->GetCell(op->getName().getStringRef().str())->IncrementBy(1);
}
bool HasBounds(RankedTensorType type) {
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
type.getEncoding());
return (encoding && !encoding.getBounds().empty());
}
bool HasStaticShapeOrBounded(Value val) {
auto type = val.getType();
if (mlir::isa<UnrankedTensorType>(type)) {
return false;
}
if (mlir::isa<RankedTensorType>(type)) {
auto ranked_tensor = mlir::dyn_cast<RankedTensorType>(type);
if (ranked_tensor.hasStaticShape()) {
return true;
}
return HasBounds(ranked_tensor);
}
return true;
}
bool EmitMustBeConstantError(Operation* op) {
if (operations_to_skip->contains(op->getRegisteredInfo()->getTypeID())) {
IncrementCounterFor(mlir_non_static_op_skip_count, op);
return true;
}
emitError(op->getLoc()) << absl::StrCat(
"Node `", op->getName().getStringRef().str(), "` ", kMustBeConstantError);
return false;
}
bool IsStaticOperation(Operation* op) {
for (auto o : op->getResults()) {
if (!HasStaticShapeOrBounded(o)) {
return EmitMustBeConstantError(op);
}
}
return true;
}
bool IsMhloAndStatic(Operation* op) {
if (!llvm::isa<mlir::mhlo::MhloDialect>(op->getDialect())) {
return true;
}
return IsStaticOperation(op);
}
bool IsDefaultConversionLegal(
Operation* op, const ConversionTarget& default_conversion_target) {
if (!default_conversion_target.isLegal(op)) {
emitError(op->getLoc()) << "Could not legalize op: " << op->getName();
return false;
}
return true;
}
void VerifyTFXLALegalization::runOnOperation() {
Operation* func_op = getOperation();
ConversionTarget default_conversion_target =
GetDefaultLegalConversionTargets(getContext(), legalize_chlo_);
bool has_invalid_ops = false;
func_op->walk([&](Operation* op) {
if (!IsMhloAndStatic(op)) {
has_invalid_ops = true;
IncrementCounterFor(mlir_non_static_op_count, op);
return WalkResult::interrupt();
}
if (!IsDefaultConversionLegal(op, default_conversion_target)) {
has_invalid_ops = true;
IncrementCounterFor(mlir_failed_legalization_op_count, op);
}
return WalkResult::advance();
});
if (has_invalid_ops) signalPassFailure();
}
}
std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateVerifyTFXLALegalizationPass(bool legalize_chlo) {
return std::make_unique<VerifyTFXLALegalization>(legalize_chlo);
}
}
} | #include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::mhlo::test::GetMlirModuleFromString;
using ::tensorflow::monitoring::testing::CellReader;
static constexpr char kFailedLegalizationStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_failed_legalization_op_count";
static constexpr char kNonStaticOpStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_non_static_op_count";
static constexpr char kNonStaticOpSkipStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_non_static_op_skip_count";
class VerifyTfxlaLegalizationTest : public ::testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateVerifyTFXLALegalizationPass(false));
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(VerifyTfxlaLegalizationTest, RecordsStreamzFailedVerification) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.BadValue"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> error(kFailedLegalizationStreamz);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(error.Delta("tf.BadValue"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, ErrorsNonStaticInputs) {
static constexpr char kNonStaticFailure[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main() -> tensor<?xi32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
%1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
%2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
%3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32>
%4 = mhlo.multiply %3, %3 : tensor<?xi32>
return %4 : tensor<?xi32>
}
})";
CellReader<int64_t> legal_error(kFailedLegalizationStreamz);
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CreateModule(kNonStaticFailure);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(legal_error.Delta("mhlo.dynamic_iota"), 0);
EXPECT_EQ(static_error.Delta("mhlo.dynamic_iota"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, SkipsSpecificNonStaticInputs) {
static constexpr char kNonStaticFailure[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main(%a : tensor<5x14x1xf32>, %b : tensor<1x14x32xf32>) -> tensor<?x?x?xf32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%c = "mhlo.einsum"(%a, %b) {einsum_config = "bji,bjk->bik"} : (tensor<5x14x1xf32>, tensor<1x14x32xf32>) -> tensor<?x?x?xf32>
return %c : tensor<?x?x?xf32>
}
})";
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CellReader<int64_t> skipped(kNonStaticOpSkipStreamz);
CreateModule(kNonStaticFailure);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(static_error.Delta("mhlo.einsum"), 0);
EXPECT_EQ(skipped.Delta("mhlo.einsum"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, SkipsNonStaticInputsWithBounds) {
static constexpr char kNonStaticWithBoundsSuccess[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main() -> tensor<?xi32, #mhlo.type_extensions<bounds = [4]>> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
%1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
%2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
%3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
%4 = mhlo.multiply %3, %3 : tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
return %4 : tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
}
})";
CellReader<int64_t> legal_error(kFailedLegalizationStreamz);
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CreateModule(kNonStaticWithBoundsSuccess);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(legal_error.Delta("mhlo.multiply"), 0);
EXPECT_EQ(static_error.Delta("mhlo.multiply"), 0);
}
TEST_F(VerifyTfxlaLegalizationTest, RecordsMultipleFailures) {
static constexpr char kMultipleFailures[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.BadValue"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
%1 = "tf.AlsoBad"() {value = dense<10> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> error(kFailedLegalizationStreamz);
CreateModule(kMultipleFailures);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(error.Delta("tf.BadValue"), 1);
EXPECT_EQ(error.Delta("tf.AlsoBad"), 1);
}
}
} | bool EmitMustBeConstantError(Operation* op) {
if (operations_to_skip->contains(op->getRegisteredInfo()->getTypeID())) {
IncrementCounterFor(mlir_non_static_op_skip_count, op);
return true;
}
emitError(op->getLoc()) << absl::StrCat(
"Node `", op->getName().getStringRef().str(), "` ", kMustBeConstantError);
return false;
} | TEST_F(VerifyTfxlaLegalizationTest, ErrorsNonStaticInputs) {
static constexpr char kNonStaticFailure[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main() -> tensor<?xi32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
%1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
%2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
%3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32>
%4 = mhlo.multiply %3, %3 : tensor<?xi32>
return %4 : tensor<?xi32>
}
})";
CellReader<int64_t> legal_error(kFailedLegalizationStreamz);
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CreateModule(kNonStaticFailure);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(legal_error.Delta("mhlo.dynamic_iota"), 0);
EXPECT_EQ(static_error.Delta("mhlo.dynamic_iota"), 1);
} |
#include "quiche/http2/hpack/decoder/hpack_decoder_state.h"
#include <string>
#include <utility>
#include "quiche/http2/http2_constants.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace {
std::string ExtractString(HpackDecoderStringBuffer* string_buffer) {
if (string_buffer->IsBuffered()) {
return string_buffer->ReleaseString();
} else {
auto result = std::string(string_buffer->str());
string_buffer->Reset();
return result;
}
}
}
HpackDecoderState::HpackDecoderState(HpackDecoderListener* listener)
: listener_(listener),
final_header_table_size_(Http2SettingsInfo::DefaultHeaderTableSize()),
lowest_header_table_size_(final_header_table_size_),
require_dynamic_table_size_update_(false),
allow_dynamic_table_size_update_(true),
saw_dynamic_table_size_update_(false),
error_(HpackDecodingError::kOk) {
QUICHE_CHECK(listener_);
}
HpackDecoderState::~HpackDecoderState() = default;
void HpackDecoderState::ApplyHeaderTableSizeSetting(
uint32_t header_table_size) {
QUICHE_DVLOG(2) << "HpackDecoderState::ApplyHeaderTableSizeSetting("
<< header_table_size << ")";
QUICHE_DCHECK_LE(lowest_header_table_size_, final_header_table_size_);
if (header_table_size < lowest_header_table_size_) {
lowest_header_table_size_ = header_table_size;
}
final_header_table_size_ = header_table_size;
QUICHE_DVLOG(2) << "low water mark: " << lowest_header_table_size_;
QUICHE_DVLOG(2) << "final limit: " << final_header_table_size_;
}
void HpackDecoderState::OnHeaderBlockStart() {
QUICHE_DVLOG(2) << "HpackDecoderState::OnHeaderBlockStart";
QUICHE_DCHECK(error_ == HpackDecodingError::kOk)
<< HpackDecodingErrorToString(error_);
QUICHE_DCHECK_LE(lowest_header_table_size_, final_header_table_size_);
allow_dynamic_table_size_update_ = true;
saw_dynamic_table_size_update_ = false;
require_dynamic_table_size_update_ =
(lowest_header_table_size_ <
decoder_tables_.current_header_table_size() ||
final_header_table_size_ < decoder_tables_.header_table_size_limit());
QUICHE_DVLOG(2) << "HpackDecoderState::OnHeaderListStart "
<< "require_dynamic_table_size_update_="
<< require_dynamic_table_size_update_;
listener_->OnHeaderListStart();
}
void HpackDecoderState::OnIndexedHeader(size_t index) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnIndexedHeader: " << index;
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
return;
}
allow_dynamic_table_size_update_ = false;
const HpackStringPair* entry = decoder_tables_.Lookup(index);
if (entry != nullptr) {
listener_->OnHeader(entry->name, entry->value);
} else {
ReportError(HpackDecodingError::kInvalidIndex);
}
}
void HpackDecoderState::OnNameIndexAndLiteralValue(
HpackEntryType entry_type, size_t name_index,
HpackDecoderStringBuffer* value_buffer) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnNameIndexAndLiteralValue "
<< entry_type << ", " << name_index << ", "
<< value_buffer->str();
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
return;
}
allow_dynamic_table_size_update_ = false;
const HpackStringPair* entry = decoder_tables_.Lookup(name_index);
if (entry != nullptr) {
std::string value(ExtractString(value_buffer));
listener_->OnHeader(entry->name, value);
if (entry_type == HpackEntryType::kIndexedLiteralHeader) {
decoder_tables_.Insert(entry->name, std::move(value));
}
} else {
ReportError(HpackDecodingError::kInvalidNameIndex);
}
}
void HpackDecoderState::OnLiteralNameAndValue(
HpackEntryType entry_type, HpackDecoderStringBuffer* name_buffer,
HpackDecoderStringBuffer* value_buffer) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnLiteralNameAndValue " << entry_type
<< ", " << name_buffer->str() << ", " << value_buffer->str();
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
return;
}
allow_dynamic_table_size_update_ = false;
std::string name(ExtractString(name_buffer));
std::string value(ExtractString(value_buffer));
listener_->OnHeader(name, value);
if (entry_type == HpackEntryType::kIndexedLiteralHeader) {
decoder_tables_.Insert(std::move(name), std::move(value));
}
}
void HpackDecoderState::OnDynamicTableSizeUpdate(size_t size_limit) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnDynamicTableSizeUpdate "
<< size_limit << ", required="
<< (require_dynamic_table_size_update_ ? "true" : "false")
<< ", allowed="
<< (allow_dynamic_table_size_update_ ? "true" : "false");
if (error_ != HpackDecodingError::kOk) {
return;
}
QUICHE_DCHECK_LE(lowest_header_table_size_, final_header_table_size_);
if (!allow_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kDynamicTableSizeUpdateNotAllowed);
return;
}
if (require_dynamic_table_size_update_) {
if (size_limit > lowest_header_table_size_) {
ReportError(HpackDecodingError::
kInitialDynamicTableSizeUpdateIsAboveLowWaterMark);
return;
}
require_dynamic_table_size_update_ = false;
} else if (size_limit > final_header_table_size_) {
ReportError(
HpackDecodingError::kDynamicTableSizeUpdateIsAboveAcknowledgedSetting);
return;
}
decoder_tables_.DynamicTableSizeUpdate(size_limit);
if (saw_dynamic_table_size_update_) {
allow_dynamic_table_size_update_ = false;
} else {
saw_dynamic_table_size_update_ = true;
}
lowest_header_table_size_ = final_header_table_size_;
}
void HpackDecoderState::OnHpackDecodeError(HpackDecodingError error) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnHpackDecodeError "
<< HpackDecodingErrorToString(error);
if (error_ == HpackDecodingError::kOk) {
ReportError(error);
}
}
void HpackDecoderState::OnHeaderBlockEnd() {
QUICHE_DVLOG(2) << "HpackDecoderState::OnHeaderBlockEnd";
if (error_ != HpackDecodingError::kOk) {
return;
}
if (require_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kMissingDynamicTableSizeUpdate);
} else {
listener_->OnHeaderListEnd();
}
}
void HpackDecoderState::ReportError(HpackDecodingError error) {
QUICHE_DVLOG(2) << "HpackDecoderState::ReportError is new="
<< (error_ == HpackDecodingError::kOk ? "true" : "false")
<< ", error: " << HpackDecodingErrorToString(error);
if (error_ == HpackDecodingError::kOk) {
listener_->OnHeaderErrorDetected(HpackDecodingErrorToString(error));
error_ = error;
}
}
} | #include "quiche/http2/hpack/decoder/hpack_decoder_state.h"
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::AssertionResult;
using ::testing::AssertionSuccess;
using ::testing::Eq;
using ::testing::Mock;
using ::testing::StrictMock;
namespace http2 {
namespace test {
class HpackDecoderStatePeer {
public:
static HpackDecoderTables* GetDecoderTables(HpackDecoderState* state) {
return &state->decoder_tables_;
}
};
namespace {
class MockHpackDecoderListener : public HpackDecoderListener {
public:
MOCK_METHOD(void, OnHeaderListStart, (), (override));
MOCK_METHOD(void, OnHeader, (absl::string_view name, absl::string_view value),
(override));
MOCK_METHOD(void, OnHeaderListEnd, (), (override));
MOCK_METHOD(void, OnHeaderErrorDetected, (absl::string_view error_message),
(override));
};
enum StringBacking { UNBUFFERED, BUFFERED };
class HpackDecoderStateTest : public quiche::test::QuicheTest {
protected:
HpackDecoderStateTest() : decoder_state_(&listener_) {}
HpackDecoderTables* GetDecoderTables() {
return HpackDecoderStatePeer::GetDecoderTables(&decoder_state_);
}
const HpackStringPair* Lookup(size_t index) {
return GetDecoderTables()->Lookup(index);
}
size_t current_header_table_size() {
return GetDecoderTables()->current_header_table_size();
}
size_t header_table_size_limit() {
return GetDecoderTables()->header_table_size_limit();
}
void set_header_table_size_limit(size_t size) {
GetDecoderTables()->DynamicTableSizeUpdate(size);
}
void SetStringBuffer(absl::string_view s, StringBacking backing,
HpackDecoderStringBuffer* string_buffer) {
string_buffer->OnStart(false, s.size());
EXPECT_TRUE(string_buffer->OnData(s.data(), s.size()));
EXPECT_TRUE(string_buffer->OnEnd());
if (backing == BUFFERED) {
string_buffer->BufferStringIfUnbuffered();
}
}
void SetName(absl::string_view s, StringBacking backing) {
SetStringBuffer(s, backing, &name_buffer_);
}
void SetValue(absl::string_view s, StringBacking backing) {
SetStringBuffer(s, backing, &value_buffer_);
}
void SendStartAndVerifyCallback() {
EXPECT_CALL(listener_, OnHeaderListStart());
decoder_state_.OnHeaderBlockStart();
Mock::VerifyAndClearExpectations(&listener_);
}
void SendSizeUpdate(size_t size) {
decoder_state_.OnDynamicTableSizeUpdate(size);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendIndexAndVerifyCallback(size_t index,
HpackEntryType ,
absl::string_view expected_name,
absl::string_view expected_value) {
EXPECT_CALL(listener_, OnHeader(Eq(expected_name), Eq(expected_value)));
decoder_state_.OnIndexedHeader(index);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendValueAndVerifyCallback(size_t name_index, HpackEntryType entry_type,
absl::string_view name,
absl::string_view value,
StringBacking value_backing) {
SetValue(value, value_backing);
EXPECT_CALL(listener_, OnHeader(Eq(name), Eq(value)));
decoder_state_.OnNameIndexAndLiteralValue(entry_type, name_index,
&value_buffer_);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendNameAndValueAndVerifyCallback(HpackEntryType entry_type,
absl::string_view name,
StringBacking name_backing,
absl::string_view value,
StringBacking value_backing) {
SetName(name, name_backing);
SetValue(value, value_backing);
EXPECT_CALL(listener_, OnHeader(Eq(name), Eq(value)));
decoder_state_.OnLiteralNameAndValue(entry_type, &name_buffer_,
&value_buffer_);
Mock::VerifyAndClearExpectations(&listener_);
}
void SendEndAndVerifyCallback() {
EXPECT_CALL(listener_, OnHeaderListEnd());
decoder_state_.OnHeaderBlockEnd();
Mock::VerifyAndClearExpectations(&listener_);
}
AssertionResult VerifyEntry(size_t dynamic_index, absl::string_view name,
absl::string_view value) {
const HpackStringPair* entry =
Lookup(dynamic_index + kFirstDynamicTableIndex - 1);
HTTP2_VERIFY_NE(entry, nullptr);
HTTP2_VERIFY_EQ(entry->name, name);
HTTP2_VERIFY_EQ(entry->value, value);
return AssertionSuccess();
}
AssertionResult VerifyNoEntry(size_t dynamic_index) {
const HpackStringPair* entry =
Lookup(dynamic_index + kFirstDynamicTableIndex - 1);
HTTP2_VERIFY_EQ(entry, nullptr);
return AssertionSuccess();
}
AssertionResult VerifyDynamicTableContents(
const std::vector<std::pair<absl::string_view, absl::string_view>>&
entries) {
size_t index = 1;
for (const auto& entry : entries) {
HTTP2_VERIFY_SUCCESS(VerifyEntry(index, entry.first, entry.second));
++index;
}
HTTP2_VERIFY_SUCCESS(VerifyNoEntry(index));
return AssertionSuccess();
}
StrictMock<MockHpackDecoderListener> listener_;
HpackDecoderState decoder_state_;
HpackDecoderStringBuffer name_buffer_, value_buffer_;
};
TEST_F(HpackDecoderStateTest, C3_RequestExamples) {
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(2, HpackEntryType::kIndexedHeader, ":method",
"GET");
SendIndexAndVerifyCallback(6, HpackEntryType::kIndexedHeader, ":scheme",
"http");
SendIndexAndVerifyCallback(4, HpackEntryType::kIndexedHeader, ":path", "/");
SendValueAndVerifyCallback(1, HpackEntryType::kIndexedLiteralHeader,
":authority", "www.example.com", UNBUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents({{":authority", "www.example.com"}}));
ASSERT_EQ(57u, current_header_table_size());
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(2, HpackEntryType::kIndexedHeader, ":method",
"GET");
SendIndexAndVerifyCallback(6, HpackEntryType::kIndexedHeader, ":scheme",
"http");
SendIndexAndVerifyCallback(4, HpackEntryType::kIndexedHeader, ":path", "/");
SendIndexAndVerifyCallback(62, HpackEntryType::kIndexedHeader, ":authority",
"www.example.com");
SendValueAndVerifyCallback(24, HpackEntryType::kIndexedLiteralHeader,
"cache-control", "no-cache", UNBUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents(
{{"cache-control", "no-cache"}, {":authority", "www.example.com"}}));
ASSERT_EQ(110u, current_header_table_size());
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(2, HpackEntryType::kIndexedHeader, ":method",
"GET");
SendIndexAndVerifyCallback(7, HpackEntryType::kIndexedHeader, ":scheme",
"https");
SendIndexAndVerifyCallback(5, HpackEntryType::kIndexedHeader, ":path",
"/index.html");
SendIndexAndVerifyCallback(63, HpackEntryType::kIndexedHeader, ":authority",
"www.example.com");
SendNameAndValueAndVerifyCallback(HpackEntryType::kIndexedLiteralHeader,
"custom-key", UNBUFFERED, "custom-value",
UNBUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents({{"custom-key", "custom-value"},
{"cache-control", "no-cache"},
{":authority", "www.example.com"}}));
ASSERT_EQ(164u, current_header_table_size());
}
TEST_F(HpackDecoderStateTest, C5_ResponseExamples) {
set_header_table_size_limit(256);
SendStartAndVerifyCallback();
SendValueAndVerifyCallback(8, HpackEntryType::kIndexedLiteralHeader,
":status", "302", BUFFERED);
SendValueAndVerifyCallback(24, HpackEntryType::kIndexedLiteralHeader,
"cache-control", "private", UNBUFFERED);
SendValueAndVerifyCallback(33, HpackEntryType::kIndexedLiteralHeader, "date",
"Mon, 21 Oct 2013 20:13:21 GMT", UNBUFFERED);
SendValueAndVerifyCallback(46, HpackEntryType::kIndexedLiteralHeader,
"location", "https:
SendEndAndVerifyCallback();
ASSERT_TRUE(
VerifyDynamicTableContents({{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"},
{":status", "302"}}));
ASSERT_EQ(222u, current_header_table_size());
SendStartAndVerifyCallback();
SendValueAndVerifyCallback(8, HpackEntryType::kIndexedLiteralHeader,
":status", "307", BUFFERED);
SendIndexAndVerifyCallback(65, HpackEntryType::kIndexedHeader,
"cache-control", "private");
SendIndexAndVerifyCallback(64, HpackEntryType::kIndexedHeader, "date",
"Mon, 21 Oct 2013 20:13:21 GMT");
SendIndexAndVerifyCallback(63, HpackEntryType::kIndexedHeader, "location",
"https:
SendEndAndVerifyCallback();
ASSERT_TRUE(
VerifyDynamicTableContents({{":status", "307"},
{"location", "https:
{"date", "Mon, 21 Oct 2013 20:13:21 GMT"},
{"cache-control", "private"}}));
ASSERT_EQ(222u, current_header_table_size());
SendStartAndVerifyCallback();
SendIndexAndVerifyCallback(8, HpackEntryType::kIndexedHeader, ":status",
"200");
SendIndexAndVerifyCallback(65, HpackEntryType::kIndexedHeader,
"cache-control", "private");
SendValueAndVerifyCallback(33, HpackEntryType::kIndexedLiteralHeader, "date",
"Mon, 21 Oct 2013 20:13:22 GMT", BUFFERED);
SendIndexAndVerifyCallback(64, HpackEntryType::kIndexedHeader, "location",
"https:
SendValueAndVerifyCallback(26, HpackEntryType::kIndexedLiteralHeader,
"content-encoding", "gzip", UNBUFFERED);
SendValueAndVerifyCallback(
55, HpackEntryType::kIndexedLiteralHeader, "set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", BUFFERED);
SendEndAndVerifyCallback();
ASSERT_TRUE(VerifyDynamicTableContents(
{{"set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"},
{"content-encoding", "gzip"},
{"date", "Mon, 21 Oct 2013 20:13:22 GMT"}}));
ASSERT_EQ(215u, current_header_table_size());
}
TEST_F(HpackDecoderStateTest, OptionalTableSizeChanges) {
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
SendSizeUpdate(1024);
EXPECT_EQ(1024u, header_table_size_limit());
SendSizeUpdate(0);
EXPECT_EQ(0u, header_table_size_limit());
EXPECT_CALL(listener_, OnHeaderErrorDetected(
Eq("Dynamic table size update not allowed")));
SendSizeUpdate(0);
}
TEST_F(HpackDecoderStateTest, RequiredTableSizeChangeBeforeHeader) {
EXPECT_EQ(4096u, decoder_state_.GetCurrentHeaderTableSizeSetting());
decoder_state_.ApplyHeaderTableSizeSetting(1024);
decoder_state_.ApplyHeaderTableSizeSetting(2048);
EXPECT_EQ(2048u, decoder_state_.GetCurrentHeaderTableSizeSetting());
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
SendSizeUpdate(1024);
EXPECT_EQ(1024u, header_table_size_limit());
SendSizeUpdate(1500);
EXPECT_EQ(1500u, header_table_size_limit());
SendEndAndVerifyCallback();
decoder_state_.ApplyHeaderTableSizeSetting(1024);
EXPECT_EQ(1024u, decoder_state_.GetCurrentHeaderTableSizeSetting());
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Missing dynamic table size update")));
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnDynamicTableSizeUpdate(1);
SetValue("value", UNBUFFERED);
decoder_state_.OnNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, 4, &value_buffer_);
SetName("name", UNBUFFERED);
decoder_state_.OnLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader,
&name_buffer_, &value_buffer_);
decoder_state_.OnHeaderBlockEnd();
decoder_state_.OnHpackDecodeError(HpackDecodingError::kIndexVarintError);
}
TEST_F(HpackDecoderStateTest, InvalidRequiredSizeUpdate) {
decoder_state_.ApplyHeaderTableSizeSetting(1024);
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
EXPECT_CALL(
listener_,
OnHeaderErrorDetected(
Eq("Initial dynamic table size update is above low water mark")));
SendSizeUpdate(2048);
}
TEST_F(HpackDecoderStateTest, RequiredTableSizeChangeBeforeEnd) {
decoder_state_.ApplyHeaderTableSizeSetting(1024);
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Missing dynamic table size update")));
decoder_state_.OnHeaderBlockEnd();
}
TEST_F(HpackDecoderStateTest, InvalidOptionalSizeUpdate) {
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq(
"Dynamic table size update is above acknowledged setting")));
SendSizeUpdate(Http2SettingsInfo::DefaultHeaderTableSize() + 1);
}
TEST_F(HpackDecoderStateTest, InvalidStaticIndex) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(
Eq("Invalid index in indexed header field representation")));
decoder_state_.OnIndexedHeader(0);
}
TEST_F(HpackDecoderStateTest, InvalidDynamicIndex) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(
Eq("Invalid index in indexed header field representation")));
decoder_state_.OnIndexedHeader(kFirstDynamicTableIndex);
}
TEST_F(HpackDecoderStateTest, InvalidNameIndex) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Invalid index in literal header field "
"with indexed name representation")));
SetValue("value", UNBUFFERED);
decoder_state_.OnNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, kFirstDynamicTableIndex,
&value_buffer_);
}
TEST_F(HpackDecoderStateTest, ErrorsSuppressCallbacks) {
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Name Huffman encoding error")));
decoder_state_.OnHpackDecodeError(HpackDecodingError::kNameHuffmanError);
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnDynamicTableSizeUpdate(1);
SetValue("value", UNBUFFERED);
decoder_state_.OnNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, 4, &value_buffer_);
SetName("name", UNBUFFERED);
decoder_state_.OnLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader,
&name_buffer_, &value_buffer_);
decoder_state_.OnHeaderBlockEnd();
decoder_state_.OnHpackDecodeError(HpackDecodingError::kIndexVarintError);
}
}
}
} | void HpackDecoderState::OnDynamicTableSizeUpdate(size_t size_limit) {
QUICHE_DVLOG(2) << "HpackDecoderState::OnDynamicTableSizeUpdate "
<< size_limit << ", required="
<< (require_dynamic_table_size_update_ ? "true" : "false")
<< ", allowed="
<< (allow_dynamic_table_size_update_ ? "true" : "false");
if (error_ != HpackDecodingError::kOk) {
return;
}
QUICHE_DCHECK_LE(lowest_header_table_size_, final_header_table_size_);
if (!allow_dynamic_table_size_update_) {
ReportError(HpackDecodingError::kDynamicTableSizeUpdateNotAllowed);
return;
}
if (require_dynamic_table_size_update_) {
if (size_limit > lowest_header_table_size_) {
ReportError(HpackDecodingError::
kInitialDynamicTableSizeUpdateIsAboveLowWaterMark);
return;
}
require_dynamic_table_size_update_ = false;
} else if (size_limit > final_header_table_size_) {
ReportError(
HpackDecodingError::kDynamicTableSizeUpdateIsAboveAcknowledgedSetting);
return;
}
decoder_tables_.DynamicTableSizeUpdate(size_limit);
if (saw_dynamic_table_size_update_) {
allow_dynamic_table_size_update_ = false;
} else {
saw_dynamic_table_size_update_ = true;
}
lowest_header_table_size_ = final_header_table_size_;
} | TEST_F(HpackDecoderStateTest, OptionalTableSizeChanges) {
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
SendSizeUpdate(1024);
EXPECT_EQ(1024u, header_table_size_limit());
SendSizeUpdate(0);
EXPECT_EQ(0u, header_table_size_limit());
EXPECT_CALL(listener_, OnHeaderErrorDetected(
Eq("Dynamic table size update not allowed")));
SendSizeUpdate(0);
}
TEST_F(HpackDecoderStateTest, RequiredTableSizeChangeBeforeHeader) {
EXPECT_EQ(4096u, decoder_state_.GetCurrentHeaderTableSizeSetting());
decoder_state_.ApplyHeaderTableSizeSetting(1024);
decoder_state_.ApplyHeaderTableSizeSetting(2048);
EXPECT_EQ(2048u, decoder_state_.GetCurrentHeaderTableSizeSetting());
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
SendSizeUpdate(1024);
EXPECT_EQ(1024u, header_table_size_limit());
SendSizeUpdate(1500);
EXPECT_EQ(1500u, header_table_size_limit());
SendEndAndVerifyCallback();
decoder_state_.ApplyHeaderTableSizeSetting(1024);
EXPECT_EQ(1024u, decoder_state_.GetCurrentHeaderTableSizeSetting());
SendStartAndVerifyCallback();
EXPECT_CALL(listener_,
OnHeaderErrorDetected(Eq("Missing dynamic table size update")));
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnIndexedHeader(1);
decoder_state_.OnDynamicTableSizeUpdate(1);
SetValue("value", UNBUFFERED);
decoder_state_.OnNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, 4, &value_buffer_);
SetName("name", UNBUFFERED);
decoder_state_.OnLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader,
&name_buffer_, &value_buffer_);
decoder_state_.OnHeaderBlockEnd();
decoder_state_.OnHpackDecodeError(HpackDecodingError::kIndexVarintError);
}
TEST_F(HpackDecoderStateTest, InvalidRequiredSizeUpdate) {
decoder_state_.ApplyHeaderTableSizeSetting(1024);
SendStartAndVerifyCallback();
EXPECT_EQ(Http2SettingsInfo::DefaultHeaderTableSize(),
header_table_size_limit());
EXPECT_CALL(
listener_,
OnHeaderErrorDetected(
Eq("Initial dynamic table size update is above low water mark")));
SendSizeUpdate(2048);
} |
#include <cmath>
#include <memory>
#include <unordered_map>
#include "tensorflow/c/checkpoint_reader.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
using str_util::Split;
using str_util::StringReplace;
using strings::StrCat;
namespace graph_transforms {
Status SparsifyWeights(const Tensor& tensor, Tensor* indices_tensor,
Tensor* values_tensor) {
if (tensor.dims() != 2 || tensor.dim_size(1) != 1) {
return tensorflow::errors::FailedPrecondition(
"Transform only applicable to subgraph with 'Const' with "
"tensor of shape [N, 1]. But instead get shape ",
tensor.shape().DebugString(), ".");
}
auto flat = tensor.flat<float>();
std::vector<int64_t> indices;
std::vector<float> values;
for (int64_t i = 0; i < flat.size(); i++) {
float val = flat(i);
if (std::abs(val) >= 1.0e-5) {
indices.push_back(i);
values.push_back(val);
}
}
if (indices.empty() || values.empty()) {
indices.push_back(0);
values.push_back(0);
}
*indices_tensor = Tensor(DataTypeToEnum<int64_t>::value,
{static_cast<int64_t>(indices.size())});
std::copy_n(indices.begin(), indices.size(),
indices_tensor->flat<int64_t>().data());
*values_tensor = Tensor(DataTypeToEnum<float>::value,
{static_cast<int64_t>(values.size())});
std::copy_n(values.begin(), values.size(),
values_tensor->flat<float>().data());
return OkStatus();
}
void CreateConstNode(const Tensor& tensor, const string& name,
NodeDef* node_def) {
node_def->set_op("Const");
node_def->set_name(name);
SetNodeTensorAttr<float>("value", tensor, node_def);
}
string GetMonolithicTensorKey(const string& tensor_slice_name) {
std::vector<string> names = Split(tensor_slice_name, "/");
if (absl::StartsWith(names[names.size() - 1], "part_")) {
CHECK_GE(names.size(), 2);
names.pop_back();
}
return absl::StrJoin(names, "/");
}
Status ObtainTensorSlice(const GraphDef& input_graph_def,
const string& target_name,
string* shape_slice_string) {
string restore_node_name;
for (const auto& node : input_graph_def.node()) {
std::vector<string> node_name_parts = Split(node.name(), "/");
if (node_name_parts.size() == 2 &&
absl::StartsWith(node_name_parts[0], "save") &&
absl::StartsWith(node_name_parts[1], "Assign") &&
node.input(0) == target_name) {
restore_node_name = node.input(1);
break;
}
}
std::vector<string> restore_node_parts = Split(restore_node_name, ":");
CHECK_LE(restore_node_parts.size(), 2);
string tensor_names_node;
string shape_and_slices_node;
for (const auto& node : input_graph_def.node()) {
if ((node.name() == restore_node_parts[0]) && (node.op() == "RestoreV2")) {
tensor_names_node = node.input(1);
shape_and_slices_node = node.input(2);
break;
}
}
int offset = -1;
for (const auto& node : input_graph_def.node()) {
if (node.name() == tensor_names_node) {
Tensor tensor_names_tensor;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &tensor_names_tensor));
const auto& tensor_names_value = tensor_names_tensor.flat<tstring>();
for (int i = 0; i < tensor_names_value.size(); i++) {
if (tensor_names_value(i) == GetMonolithicTensorKey(target_name)) {
offset = i;
break;
}
}
}
}
if (offset == -1) {
return errors::Internal("Unable to find RestoreV2 entry for variable: ",
target_name);
}
for (const auto& node : input_graph_def.node()) {
if (node.name() == shape_and_slices_node) {
Tensor shape_and_slices_tensor;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &shape_and_slices_tensor));
const auto& shape_and_slices_value =
shape_and_slices_tensor.flat<tstring>();
*shape_slice_string = shape_and_slices_value(offset);
return OkStatus();
}
}
return errors::Internal("Unable to find slice for variable: ", target_name);
}
Status ReadTensorFromCheckpoint(
const string& tensor_name, const std::unique_ptr<BundleReader>& ckpt_reader,
const string& shape_and_slice, Tensor* tensor) {
if (ckpt_reader) {
TensorShape parsed_full_shape;
TensorSlice parsed_slice;
TensorShape parsed_slice_shape;
bool get_slice = false;
if (!shape_and_slice.empty()) {
TF_RETURN_IF_ERROR(
checkpoint::ParseShapeAndSlice(shape_and_slice, &parsed_full_shape,
&parsed_slice, &parsed_slice_shape));
get_slice = (parsed_full_shape != parsed_slice_shape);
}
if (get_slice) {
TF_RETURN_IF_ERROR(ckpt_reader->LookupSlice(
GetMonolithicTensorKey(tensor_name), parsed_slice, tensor));
} else {
TF_RETURN_IF_ERROR(
ckpt_reader->Lookup(GetMonolithicTensorKey(tensor_name), tensor));
}
return OkStatus();
}
return errors::Internal("Checkpoint reader was not initialized. ");
}
Status InitializeCheckpointReader(const TransformFuncContext& context,
std::unique_ptr<BundleReader>* ckpt_reader) {
if (context.params.count("input_checkpoint")) {
const string input_checkpoint = context.params.at("input_checkpoint")[0];
ckpt_reader->reset(new BundleReader(Env::Default(), input_checkpoint));
TF_RETURN_IF_ERROR((*ckpt_reader)->status());
}
return OkStatus();
}
Status ObtainVariableInfo(
const GraphDef& input_graph_def,
std::unique_ptr<std::unordered_map<string, string> >* shapes_and_slices) {
shapes_and_slices->reset(new std::unordered_map<string, string>());
for (const auto& node : input_graph_def.node()) {
if ((node.op() == "Variable") || (node.op() == "VariableV2")) {
string s;
TF_RETURN_IF_ERROR(ObtainTensorSlice(input_graph_def, node.name(), &s));
(**shapes_and_slices)[node.name()] = s;
}
}
return OkStatus();
}
Status RemoveInputAtIndex(NodeDef* n, int index) {
for (int i = index; i < n->input_size() - 1; i++) {
n->mutable_input()->SwapElements(i, i + 1);
}
n->mutable_input()->RemoveLast();
return OkStatus();
}
Status RemoveNodeAtIndex(GraphDef* g, int index) {
for (int i = index; i < g->node_size() - 1; i++) {
g->mutable_node()->SwapElements(i, i + 1);
}
g->mutable_node()->RemoveLast();
return OkStatus();
}
Status SparsifyGatherInternal(
const GraphDef& input_graph_def,
const std::unique_ptr<std::unordered_map<string, string> >&
shapes_and_slices,
const TransformFuncContext& context, const OpTypePattern& pattern,
const std::unique_ptr<BundleReader>& ckpt_reader,
GraphDef* output_graph_def) {
string group_init_node = "group_deps";
if (context.params.count("group_init_node")) {
group_init_node = context.params.at("group_init_node")[0];
}
GraphDef current_graph_def = input_graph_def;
bool any_match_found = false;
std::unordered_map<string, int> refs;
for (const auto& node : current_graph_def.node()) {
for (const auto& input : node.input()) {
auto parsed_input = StringReplace(input, "^", "", true);
refs[parsed_input] += 1;
}
}
do {
any_match_found = false;
GraphDef replaced_graph_def = current_graph_def;
std::vector<string> init_table_node_names;
std::vector<string> removed_node_names;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def, pattern,
[&ckpt_reader, &any_match_found, &init_table_node_names,
&shapes_and_slices, &removed_node_names,
&refs](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
any_match_found = true;
const NodeDef& gather_node = match.node;
if (gather_node.op() == "GatherV2") {
const NodeDef& axis_node = match.inputs[2].node;
Tensor axis_t;
TF_RETURN_IF_ERROR(GetNodeAttr(axis_node, "value", &axis_t));
int64_t axis = 0;
if (axis_t.dtype() == DT_INT32) {
axis = axis_t.scalar<int32>()();
} else if (axis_t.dtype() == DT_INT64) {
axis = axis_t.scalar<int64_t>()();
} else {
return tensorflow::errors::FailedPrecondition(
"Gather axis was not int32 or int64.");
}
if (axis != 0) {
return tensorflow::errors::FailedPrecondition(
"Transform only applicable to subgraph with GatherV2 over "
"axis 0. Found axis ",
axis, ".");
}
}
const NodeDef& weights_node = match.inputs[0].inputs[0].node;
DataType data_type;
TF_RETURN_IF_ERROR(GetNodeAttr(weights_node, "dtype", &data_type));
if (data_type != DT_FLOAT) {
return tensorflow::errors::FailedPrecondition(
"Transform only applicable to subgraph with 'Const',"
"'Variable', or 'VariableV2' of dtype "
"'DT_FLOAT'. Found '" +
weights_node.op() + "' with name '",
weights_node.name(), "' and dtype '", data_type, "'.");
}
Tensor weight;
if (weights_node.op() == "Const") {
weight = GetNodeTensorAttr(weights_node, "value");
} else {
TF_RETURN_IF_ERROR(ReadTensorFromCheckpoint(
weights_node.name(), ckpt_reader,
(*shapes_and_slices)[weights_node.name()], &weight));
}
removed_node_names.push_back(weights_node.name());
removed_node_names.push_back(match.inputs[0].node.name());
for (auto input_node : match.inputs[0].node.input()) {
auto parsed_input = StringReplace(input_node, "^", "", true);
refs[parsed_input]--;
}
Tensor indices_tensor;
Tensor values_tensor;
TF_RETURN_IF_ERROR(
SparsifyWeights(weight, &indices_tensor, &values_tensor));
DataType key_dtype = DT_INT64;
NodeDef indices_node;
CreateConstNode(indices_tensor,
StrCat(weights_node.name(), "/indices"),
&indices_node);
SetNodeAttr("dtype", key_dtype, &indices_node);
NodeDef values_node;
CreateConstNode(values_tensor, StrCat(weights_node.name(), "/values"),
&values_node);
SetNodeAttr("dtype", data_type, &values_node);
NodeDef hashtable_node;
hashtable_node.set_op("HashTable");
hashtable_node.set_name(StrCat(weights_node.name(), "/HashTable"));
SetNodeAttr("key_dtype", key_dtype, &hashtable_node);
SetNodeAttr("value_dtype", data_type, &hashtable_node);
NodeDef init_table_node;
init_table_node.set_op("InitializeTable");
init_table_node.set_name(
StrCat(weights_node.name(), "/InitializeTable"));
SetNodeAttr("Tkey", key_dtype, &init_table_node);
SetNodeAttr("Tval", data_type, &init_table_node);
init_table_node_names.push_back(init_table_node.name());
NodeDef lookup_node;
lookup_node.set_op("LookupTableFind");
lookup_node.set_name(StrCat(gather_node.name(), "/LookupTableFind"));
SetNodeAttr("Tin", key_dtype, &lookup_node);
SetNodeAttr("Tout", data_type, &lookup_node);
Tensor zero_tensor(data_type, TensorShape({}));
zero_tensor.flat<float>()(0) = 0.0;
NodeDef default_value_node;
CreateConstNode(zero_tensor, StrCat(gather_node.name(), "/Const"),
&default_value_node);
SetNodeAttr("dtype", data_type, &default_value_node);
Tensor dim_idx(DT_INT32, TensorShape({}));
dim_idx.flat<int32>()(0) = -1;
NodeDef dim_idx_node;
dim_idx_node.set_op("Const");
dim_idx_node.set_name(
StrCat(gather_node.name(), "/ExpandDims/Const"));
SetNodeAttr("value", dim_idx, &dim_idx_node);
SetNodeAttr("dtype", DT_INT32, &dim_idx_node);
NodeDef expand_dims_node;
expand_dims_node.set_op("ExpandDims");
expand_dims_node.set_name(gather_node.name());
SetNodeAttr("T", data_type, &expand_dims_node);
AddNodeInput(hashtable_node.name(), &init_table_node);
refs[hashtable_node.name()]++;
AddNodeInput(indices_node.name(), &init_table_node);
refs[indices_node.name()]++;
AddNodeInput(values_node.name(), &init_table_node);
refs[values_node.name()]++;
AddNodeInput(hashtable_node.name(), &lookup_node);
refs[hashtable_node.name()]++;
AddNodeInput(gather_node.input(1), &lookup_node);
refs[gather_node.input(1)]++;
AddNodeInput(default_value_node.name(), &lookup_node);
refs[default_value_node.name()]++;
AddNodeInput(lookup_node.name(), &expand_dims_node);
refs[lookup_node.name()]++;
AddNodeInput(dim_idx_node.name(), &expand_dims_node);
refs[dim_idx_node.name()]++;
new_nodes->push_back(match.inputs[1].node);
new_nodes->push_back(indices_node);
new_nodes->push_back(values_node);
new_nodes->push_back(hashtable_node);
new_nodes->push_back(init_table_node);
new_nodes->push_back(lookup_node);
new_nodes->push_back(default_value_node);
new_nodes->push_back(dim_idx_node);
new_nodes->push_back(expand_dims_node);
return OkStatus();
},
{true}, &replaced_graph_def));
NodeDef* init_op = nullptr;
for (int i = 0; i < replaced_graph_def.node_size(); i++) {
if (replaced_graph_def.node(i).name() == group_init_node &&
replaced_graph_def.node(i).op() == "NoOp") {
init_op = replaced_graph_def.mutable_node(i);
break;
}
}
if (!init_op) {
init_op = replaced_graph_def.mutable_node()->Add();
init_op->set_op("NoOp");
init_op->set_name(group_init_node);
}
for (const string& name : init_table_node_names) {
AddNodeInput(StrCat("^", name), init_op);
refs[name]++;
}
for (const auto& output : context.output_names) {
refs.erase(output);
}
for (const auto& input : context.input_names) {
refs.erase(input);
}
for (const auto& entry : refs) {
if (entry.second == 0) {
removed_node_names.push_back(entry.first);
}
}
while (!removed_node_names.empty()) {
auto name = removed_node_names.back();
removed_node_names.pop_back();
int i = 0;
while (i < replaced_graph_def.node_size()) {
if ((replaced_graph_def.node(i).name() == name) &&
(replaced_graph_def.node(i).op() != "RestoreV2")) {
for (const auto& input : replaced_graph_def.node(i).input()) {
auto parsed_input = StringReplace(input, "^", "", true);
refs[parsed_input] -= 1;
if (refs[parsed_input] == 0) {
removed_node_names.push_back(parsed_input);
}
}
TF_RETURN_IF_ERROR(RemoveNodeAtIndex(&replaced_graph_def, i));
continue;
}
int j = 0;
bool deleted_inputs = false;
while (j < replaced_graph_def.node(i).input_size()) {
if (replaced_graph_def.node(i).input(j) == name ||
replaced_graph_def.node(i).input(j) == ("^" + name)) {
TF_RETURN_IF_ERROR(
RemoveInputAtIndex(replaced_graph_def.mutable_node(i), j));
deleted_inputs = true;
continue;
}
j++;
}
if (deleted_inputs) {
if (replaced_graph_def.node(i).op() == "ConcatV2") {
if (replaced_graph_def.node(i).input_size() > 2) {
SetNodeAttr("N", replaced_graph_def.node(i).input_size() - 1,
replaced_graph_def.mutable_node(i));
} else if (replaced_graph_def.node(i).input_size() == 2) {
if (refs[replaced_graph_def.node(i).input(1)] != 1) {
return errors::Internal(
"Expect axis tensor of ConcatV2 node to only be referenced "
"once.");
}
refs[replaced_graph_def.node(i).input(1)] -= 1;
removed_node_names.push_back(replaced_graph_def.node(i).input(1));
replaced_graph_def.mutable_node(i)->mutable_input()->RemoveLast();
replaced_graph_def.mutable_node(i)->mutable_attr()->erase("N");
replaced_graph_def.mutable_node(i)->set_op("Identity");
} else {
return errors::Internal(
"ConcatV2 should have at least two elements");
}
}
if ((replaced_graph_def.node(i).op() == "Assign" ||
replaced_graph_def.node(i).op() == "Reshape" ||
replaced_graph_def.node(i).op() == "Equal" ||
replaced_graph_def.node(i).op() == "Mean" ||
replaced_graph_def.node(i).op() == "ScalarSummary") &&
replaced_graph_def.node(i).input_size() == 1) {
removed_node_names.push_back(replaced_graph_def.node(i).name());
}
if (!replaced_graph_def.node(i).input_size()) {
removed_node_names.push_back(replaced_graph_def.node(i).name());
}
}
i++;
}
}
current_graph_def = replaced_graph_def;
} while (any_match_found);
*output_graph_def = current_graph_def;
return OkStatus();
}
Status SparsifyGather(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
const OpTypePattern gather_pattern =
{"Gather",
{
{"Identity",
{
{"Const|Variable|VariableV2"}
}
},
{"*"},
}
};
const OpTypePattern gather_v2_pattern =
{"GatherV2",
{
{"Identity",
{
{"Const|Variable|VariableV2"}
}
},
{"*"},
{"Const"},
}
};
GraphDef cleaned_input_graph_def;
RemoveAttributes(input_graph_def, {"_output_shapes"},
&cleaned_input_graph_def);
GraphDef temp_output;
std::unique_ptr<BundleReader> ckpt_reader;
TF_RETURN_IF_ERROR(InitializeCheckpointReader(context, &ckpt_reader));
std::unique_ptr<std::unordered_map<string, string> > shapes_and_slices;
TF_RETURN_IF_ERROR(
ObtainVariableInfo(cleaned_input_graph_def, &shapes_and_slices));
TF_RETURN_IF_ERROR(SparsifyGatherInternal(
cleaned_input_graph_def, shapes_and_slices, context, gather_pattern,
ckpt_reader, &temp_output));
TF_RETURN_IF_ERROR(SparsifyGatherInternal(temp_output, shapes_and_slices,
context, gather_v2_pattern,
ckpt_reader, output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("sparsify_gather", SparsifyGather);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status SparsifyGather(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status ReadTensorFromCheckpoint(
const string& tensor_name, const std::unique_ptr<BundleReader>& ckpt_reader,
const string& shape_and_slice, Tensor* tensor);
class SparsifyGatherTest : public ::testing::Test {
protected:
NodeDef* CreateNode(const StringPiece name, const StringPiece op,
const std::vector<NodeDef*>& inputs, GraphDef* graph_def,
bool control_dep = false) {
NodeDef* node_def = graph_def->add_node();
node_def->set_name(string(name));
node_def->set_op(string(op));
if (!control_dep) {
std::for_each(inputs.begin(), inputs.end(), [&node_def](NodeDef* input) {
node_def->add_input(input->name());
});
} else {
std::for_each(inputs.begin(), inputs.end(), [&node_def](NodeDef* input) {
node_def->add_input(strings::StrCat("^", input->name()));
});
}
return node_def;
}
void MakeGather(StringPiece name, bool gather_v2, NodeDef* params,
NodeDef* indices, GraphDef* graph_def) {
if (gather_v2) {
NodeDef* axis_node =
CreateNode(strings::StrCat(name, "_axis"), "Const", {}, graph_def);
Tensor axis_t(DT_INT32, TensorShape({}));
axis_t.scalar<int32>()() = 0;
SetNodeTensorAttr<int32>("value", axis_t, axis_node);
CreateNode(name, "GatherV2", {params, indices, axis_node}, graph_def);
} else {
CreateNode(name, "Gather", {params, indices}, graph_def);
}
}
void TestSinglePartition(bool gather_v2, bool include_shared_init,
bool test_variable, bool test_kept_concat,
const string& shared_init_name = "group_deps") {
GraphDef graph_def;
const auto checkpoint_path =
io::JoinPath(testing::TmpDir(), "checkpoint_single");
NodeDef* input_node = CreateNode("ids", "Const", {}, &graph_def);
NodeDef* w_node;
NodeDef* zeros_const;
NodeDef* zeros_shape;
NodeDef* zeros_node;
NodeDef* assign_node;
Tensor weights(DT_FLOAT, TensorShape({4, 1}));
test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001});
if (!test_variable) {
w_node = CreateNode("w/part_1", "Const", {}, &graph_def);
SetNodeTensorAttr<float>("value", weights, w_node);
} else {
w_node = CreateNode("w/part_1", "VariableV2", {}, &graph_def);
zeros_shape = CreateNode("w/part_1/Initializer/zeros/shape_as_tensor",
"Const", {}, &graph_def);
zeros_const = CreateNode("w/part_1/Initializer/zeros/Const", "Const", {},
&graph_def);
zeros_node = CreateNode("w/part_1/Initializer/zeros", "Fill",
{zeros_shape, zeros_const}, &graph_def);
assign_node = CreateNode("w/part_1/Assign", "Assign",
{w_node, zeros_node}, &graph_def);
NodeDef* save_const_node =
CreateNode("save/Const", "Const", {}, &graph_def);
Tensor tensor_names_values(DT_STRING, TensorShape({1}));
test::FillValues<tstring>(&tensor_names_values, {"w"});
NodeDef* tensor_names_node =
CreateNode("save/RestoreV2/tensor_names", "Const", {}, &graph_def);
SetNodeTensorAttr<string>("value", tensor_names_values,
tensor_names_node);
NodeDef* tensor_shapes_slices_node = CreateNode(
"save/RestoreV2/shape_and_slices", "Const", {}, &graph_def);
Tensor shapes_slices_val(DT_STRING, TensorShape({1}));
shapes_slices_val.flat<tstring>()(0) = "4 1 0,4:0,1";
SetNodeTensorAttr<string>("value", shapes_slices_val,
tensor_shapes_slices_node);
NodeDef* restore_node = CreateNode(
"save/RestoreV2", "RestoreV2",
{save_const_node, tensor_names_node, tensor_shapes_slices_node},
&graph_def);
CreateNode("save/Assign", "Assign", {w_node, restore_node}, &graph_def);
BundleWriter writer(Env::Default(), checkpoint_path);
TF_ASSERT_OK(writer.Add("w", weights));
TF_ASSERT_OK(writer.Finish());
}
SetNodeAttr("dtype", DT_FLOAT, w_node);
NodeDef* identity_node =
CreateNode("w/read", "Identity", {w_node}, &graph_def);
MakeGather("gather", gather_v2, identity_node, input_node, &graph_def);
if (include_shared_init) {
if (!test_variable) {
CreateNode(shared_init_name, "NoOp", {}, &graph_def);
} else {
CreateNode(shared_init_name, "NoOp", {assign_node}, &graph_def, true);
}
}
NodeDef* concat_axis_node =
CreateNode("linear/concat/axis", "Const", {}, &graph_def);
NodeDef* concat_input_node =
CreateNode("concat/input/node", "Const", {}, &graph_def);
NodeDef* concat_node = nullptr;
if (!test_kept_concat) {
concat_node = CreateNode(
"concat/node", "ConcatV2",
{identity_node, concat_input_node, concat_axis_node}, &graph_def);
SetNodeAttr("N", 2, concat_node);
} else {
NodeDef* concat_input_node_2 =
CreateNode("concat/input/node_2", "Const", {}, &graph_def);
concat_node = CreateNode("concat/node", "ConcatV2",
{identity_node, concat_input_node,
concat_input_node_2, concat_axis_node},
&graph_def);
SetNodeAttr("N", 3, concat_node);
}
GraphDef result;
TransformFuncContext context;
context.input_names = {"ids"};
context.output_names = {"gather"};
if (test_variable) {
context.params["input_checkpoint"] = {checkpoint_path};
}
if (shared_init_name != "group_deps") {
context.params["group_init_node"] = {shared_init_name};
}
TF_ASSERT_OK(SparsifyGather(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(0,
node_lookup.count("w/part_1/Initializer/zeros/shape_as_tensor"));
EXPECT_EQ(0, node_lookup.count("w/part_1/Initializer/zeros/Const"));
EXPECT_EQ(0, node_lookup.count("w/part_1/Initializer/zeros"));
EXPECT_EQ(0, node_lookup.count("w/part_1/Assign"));
EXPECT_EQ(1, node_lookup.count("ids"));
EXPECT_EQ("Const", node_lookup.at("ids")->op());
EXPECT_EQ(1, node_lookup.count("concat/node"));
if (!test_kept_concat) {
EXPECT_EQ(0, node_lookup.count("linear/concat/axis"));
EXPECT_EQ("Identity", node_lookup.at("concat/node")->op());
EXPECT_EQ(1, node_lookup.at("concat/node")->input_size());
EXPECT_EQ("concat/input/node", node_lookup.at("concat/node")->input(0));
} else {
EXPECT_EQ(1, node_lookup.count("linear/concat/axis"));
EXPECT_EQ("ConcatV2", node_lookup.at("concat/node")->op());
EXPECT_EQ(3, node_lookup.at("concat/node")->input_size());
EXPECT_EQ("concat/input/node", node_lookup.at("concat/node")->input(0));
EXPECT_EQ("concat/input/node_2", node_lookup.at("concat/node")->input(1));
EXPECT_EQ("linear/concat/axis", node_lookup.at("concat/node")->input(2));
EXPECT_EQ(2, node_lookup.at("concat/node")->attr().at("N").i());
}
EXPECT_EQ(1, node_lookup.count("w/part_1/indices"));
EXPECT_EQ("Const", node_lookup.at("w/part_1/indices")->op());
Tensor expected_indices_tensor(DT_INT64, TensorShape({3}));
test::FillValues<int64_t>(&expected_indices_tensor, {0, 2, 3});
test::ExpectTensorEqual<int64_t>(
expected_indices_tensor,
GetNodeTensorAttr(*(node_lookup.at("w/part_1/indices")), "value"));
EXPECT_EQ(1, node_lookup.count("w/part_1/values"));
EXPECT_EQ("Const", node_lookup.at("w/part_1/values")->op());
Tensor expected_values_tensor(DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected_values_tensor, {0.2, 1.2, 0.001});
test::ExpectTensorNear<float>(
expected_values_tensor,
GetNodeTensorAttr(*(node_lookup.at("w/part_1/values")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("w/part_1/HashTable"));
EXPECT_EQ("HashTable", node_lookup.at("w/part_1/HashTable")->op());
EXPECT_EQ(1, node_lookup.count("w/part_1/InitializeTable"));
EXPECT_EQ("InitializeTable",
node_lookup.at("w/part_1/InitializeTable")->op());
EXPECT_EQ(1, node_lookup.count("gather/LookupTableFind"));
EXPECT_EQ("LookupTableFind",
node_lookup.at("gather/LookupTableFind")->op());
EXPECT_EQ(1, node_lookup.count("gather/Const"));
EXPECT_EQ("Const", node_lookup.at("gather/Const")->op());
Tensor expected_gather_default_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_gather_default_tensor, {0.0});
test::ExpectTensorNear<float>(
expected_gather_default_tensor,
GetNodeTensorAttr(*(node_lookup.at("gather/Const")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("gather/ExpandDims/Const"));
EXPECT_EQ("Const", node_lookup.at("gather/ExpandDims/Const")->op());
Tensor expected_expand_dims_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&expected_expand_dims_tensor, {-1});
test::ExpectTensorEqual<int32>(
expected_expand_dims_tensor,
GetNodeTensorAttr(*(node_lookup.at("gather/ExpandDims/Const")),
"value"));
EXPECT_EQ(1, node_lookup.count("gather"));
EXPECT_EQ("ExpandDims", node_lookup.at("gather")->op());
EXPECT_EQ(1, node_lookup.count(shared_init_name));
EXPECT_EQ("NoOp", node_lookup.at(shared_init_name)->op());
EXPECT_EQ("w/part_1/HashTable",
node_lookup.at("w/part_1/InitializeTable")->input(0));
EXPECT_EQ("w/part_1/indices",
node_lookup.at("w/part_1/InitializeTable")->input(1));
EXPECT_EQ("w/part_1/values",
node_lookup.at("w/part_1/InitializeTable")->input(2));
EXPECT_EQ("w/part_1/HashTable",
node_lookup.at("gather/LookupTableFind")->input(0));
EXPECT_EQ("ids", node_lookup.at("gather/LookupTableFind")->input(1));
EXPECT_EQ("gather/Const",
node_lookup.at("gather/LookupTableFind")->input(2));
EXPECT_EQ("gather/LookupTableFind", node_lookup.at("gather")->input(0));
EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(),
node_lookup.at(shared_init_name)->input().end(),
"^w/part_1/InitializeTable"),
node_lookup.at(shared_init_name)->input().end());
EXPECT_EQ(1, node_lookup.at(shared_init_name)->input().size());
}
void TestMultiPartition(bool gather_v2, bool include_shared_init,
bool test_variable,
const string& shared_init_name = "group_deps") {
GraphDef graph_def;
const auto checkpoint_path =
io::JoinPath(testing::TmpDir(), "checkpoint_multiple");
NodeDef* input_node = CreateNode("ids", "Const", {}, &graph_def);
NodeDef* w_node1;
NodeDef* w_node2;
NodeDef* zeros_const1;
NodeDef* zeros_shape1;
NodeDef* zeros_node1;
NodeDef* zeros_const2;
NodeDef* zeros_shape2;
NodeDef* zeros_node2;
NodeDef* assign_node1;
NodeDef* assign_node2;
Tensor weights(DT_FLOAT, TensorShape({4, 1}));
test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001});
if (!test_variable) {
w_node1 = CreateNode("w1/part_1", "Const", {}, &graph_def);
w_node2 = CreateNode("w2/part_1", "Const", {}, &graph_def);
SetNodeTensorAttr<float>("value", weights, w_node1);
SetNodeTensorAttr<float>("value", weights, w_node2);
} else {
NodeDef* save_const_node =
CreateNode("save/Const", "Const", {}, &graph_def);
NodeDef* tensor_names_node =
CreateNode("save/RestoreV2/tensor_names", "Const", {}, &graph_def);
Tensor tensor_names_values(DT_STRING, TensorShape({2}));
test::FillValues<tstring>(&tensor_names_values, {"w1", "w2"});
SetNodeTensorAttr<string>("value", tensor_names_values,
tensor_names_node);
NodeDef* tensor_shapes_slices_node = CreateNode(
"save/RestoreV2/shape_and_slices", "Const", {}, &graph_def);
Tensor shapes_slices_val(DT_STRING, TensorShape({2}));
shapes_slices_val.flat<tstring>()(0) = "4 1 0,4:0,1";
shapes_slices_val.flat<tstring>()(1) = "4 1 0,4:0,1";
SetNodeTensorAttr<string>("value", shapes_slices_val,
tensor_shapes_slices_node);
NodeDef* restore_node = CreateNode(
"save/RestoreV2", "RestoreV2",
{save_const_node, tensor_names_node, tensor_shapes_slices_node},
&graph_def);
w_node1 = CreateNode("w1/part_1", "VariableV2", {}, &graph_def);
zeros_shape1 = CreateNode("w1/part_1/Initializer/zeros/shape_as_tensor",
"Const", {}, &graph_def);
zeros_const1 = CreateNode("w1/part_1/Initializer/zeros/Const", "Const",
{}, &graph_def);
zeros_node1 = CreateNode("w1/part_1/Initializer/zeros", "Fill",
{zeros_shape1, zeros_const1}, &graph_def);
assign_node1 = CreateNode("w1/part_1/Assign", "Assign",
{w_node1, zeros_node1}, &graph_def);
CreateNode("save/Assign", "Assign", {w_node1, restore_node}, &graph_def);
w_node2 = CreateNode("w2/part_1", "VariableV2", {}, &graph_def);
zeros_shape2 = CreateNode("w2/part_1/Initializer/zeros/shape_as_tensor",
"Const", {}, &graph_def);
zeros_const2 = CreateNode("w2/part_1/Initializer/zeros/Const", "Const",
{}, &graph_def);
zeros_node2 = CreateNode("w2/part_1/Initializer/zeros", "Fill",
{zeros_shape2, zeros_const2}, &graph_def);
assign_node2 = CreateNode("w2/part_1/Assign", "Assign",
{w_node2, zeros_node2}, &graph_def);
CreateNode("save/Assign_1", "Assign", {w_node2, restore_node},
&graph_def);
BundleWriter writer(Env::Default(), checkpoint_path);
TF_ASSERT_OK(writer.Add("w1", weights));
TF_ASSERT_OK(writer.Add("w2", weights));
TF_ASSERT_OK(writer.Finish());
}
SetNodeAttr("dtype", DT_FLOAT, w_node1);
SetNodeAttr("dtype", DT_FLOAT, w_node2);
NodeDef* identity_node1 =
CreateNode("w1/part_1/read", "Identity", {w_node1}, &graph_def);
NodeDef* identity_node2 =
CreateNode("w2/part_1/read", "Identity", {w_node2}, &graph_def);
MakeGather("gather1", gather_v2, identity_node1, input_node, &graph_def);
MakeGather("gather2", gather_v2, identity_node2, input_node, &graph_def);
NodeDef* concat_axis_node =
CreateNode("linear/concat/axis", "Const", {}, &graph_def);
NodeDef* concat_node = CreateNode(
"concat/node", "ConcatV2",
{identity_node1, identity_node2, concat_axis_node}, &graph_def);
SetNodeAttr("N", 2, concat_node);
if (include_shared_init) {
if (!test_variable) {
CreateNode(shared_init_name, "NoOp", {}, &graph_def);
} else {
CreateNode(shared_init_name, "NoOp", {assign_node1, assign_node2},
&graph_def, true);
}
}
GraphDef result;
TransformFuncContext context;
context.input_names = {"ids"};
context.output_names = {"gather1", "gather2"};
if (test_variable) {
context.params["input_checkpoint"] = {checkpoint_path};
}
if (shared_init_name != "group_deps") {
context.params["group_init_node"] = {shared_init_name};
}
TF_ASSERT_OK(SparsifyGather(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(0,
node_lookup.count("w1/part_1/Initializer/zeros/shape_as_tensor"));
EXPECT_EQ(0, node_lookup.count("w1/part_1/Initializer/zeros/Const"));
EXPECT_EQ(0, node_lookup.count("w1/part_1/Initializer/zeros"));
EXPECT_EQ(0, node_lookup.count("w1/part_1/Assign"));
EXPECT_EQ(0,
node_lookup.count("w2/part_1/Initializer/zeros/shape_as_tensor"));
EXPECT_EQ(0, node_lookup.count("w2/part_1/Initializer/zeros/Const"));
EXPECT_EQ(0, node_lookup.count("w2/part_1/Initializer/zeros"));
EXPECT_EQ(0, node_lookup.count("w2/part_1/Assign"));
EXPECT_EQ(1, node_lookup.count("ids"));
EXPECT_EQ("Const", node_lookup.at("ids")->op());
EXPECT_EQ(1, node_lookup.count(shared_init_name));
EXPECT_EQ("NoOp", node_lookup.at(shared_init_name)->op());
EXPECT_EQ(1, node_lookup.count("w1/part_1/indices"));
EXPECT_EQ("Const", node_lookup.at("w1/part_1/indices")->op());
Tensor expected_indices_tensor1(DT_INT64, TensorShape({3}));
test::FillValues<int64_t>(&expected_indices_tensor1, {0, 2, 3});
test::ExpectTensorEqual<int64_t>(
expected_indices_tensor1,
GetNodeTensorAttr(*(node_lookup.at("w1/part_1/indices")), "value"));
EXPECT_EQ(1, node_lookup.count("w1/part_1/values"));
EXPECT_EQ("Const", node_lookup.at("w1/part_1/values")->op());
Tensor expected_values_tensor1(DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected_values_tensor1, {0.2, 1.2, 0.001});
test::ExpectTensorNear<float>(
expected_values_tensor1,
GetNodeTensorAttr(*(node_lookup.at("w1/part_1/values")), "value"),
1e-5);
EXPECT_EQ(1, node_lookup.count("w1/part_1/HashTable"));
EXPECT_EQ("HashTable", node_lookup.at("w1/part_1/HashTable")->op());
EXPECT_EQ(1, node_lookup.count("w1/part_1/InitializeTable"));
EXPECT_EQ("InitializeTable",
node_lookup.at("w1/part_1/InitializeTable")->op());
EXPECT_EQ(1, node_lookup.count("gather1/LookupTableFind"));
EXPECT_EQ("LookupTableFind",
node_lookup.at("gather1/LookupTableFind")->op());
EXPECT_EQ(1, node_lookup.count("gather1/Const"));
EXPECT_EQ("Const", node_lookup.at("gather1/Const")->op());
Tensor expected_gather_default_tensor1(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_gather_default_tensor1, {0.0});
test::ExpectTensorNear<float>(
expected_gather_default_tensor1,
GetNodeTensorAttr(*(node_lookup.at("gather1/Const")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("gather1/ExpandDims/Const"));
EXPECT_EQ("Const", node_lookup.at("gather1/ExpandDims/Const")->op());
Tensor expected_expand_dims_tensor1(DT_INT32, TensorShape({}));
test::FillValues<int32>(&expected_expand_dims_tensor1, {-1});
test::ExpectTensorEqual<int32>(
expected_expand_dims_tensor1,
GetNodeTensorAttr(*(node_lookup.at("gather1/ExpandDims/Const")),
"value"));
EXPECT_EQ(1, node_lookup.count("gather1"));
EXPECT_EQ("ExpandDims", node_lookup.at("gather1")->op());
EXPECT_EQ(1, node_lookup.count("w2/part_1/indices"));
EXPECT_EQ("Const", node_lookup.at("w2/part_1/indices")->op());
Tensor expected_indices_tensor2(DT_INT64, TensorShape({3}));
test::FillValues<int64_t>(&expected_indices_tensor2, {0, 2, 3});
test::ExpectTensorEqual<int64_t>(
expected_indices_tensor2,
GetNodeTensorAttr(*(node_lookup.at("w2/part_1/indices")), "value"));
EXPECT_EQ(1, node_lookup.count("w2/part_1/values"));
EXPECT_EQ("Const", node_lookup.at("w2/part_1/values")->op());
Tensor expected_values_tensor2(DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected_values_tensor2, {0.2, 1.2, 0.001});
test::ExpectTensorNear<float>(
expected_values_tensor2,
GetNodeTensorAttr(*(node_lookup.at("w2/part_1/values")), "value"),
1e-5);
EXPECT_EQ(1, node_lookup.count("w2/part_1/HashTable"));
EXPECT_EQ("HashTable", node_lookup.at("w2/part_1/HashTable")->op());
EXPECT_EQ(1, node_lookup.count("w2/part_1/InitializeTable"));
EXPECT_EQ("InitializeTable",
node_lookup.at("w2/part_1/InitializeTable")->op());
EXPECT_EQ(1, node_lookup.count("gather2/LookupTableFind"));
EXPECT_EQ("LookupTableFind",
node_lookup.at("gather2/LookupTableFind")->op());
EXPECT_EQ(1, node_lookup.count("gather2/Const"));
EXPECT_EQ("Const", node_lookup.at("gather2/Const")->op());
Tensor expected_gather_default_tensor2(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_gather_default_tensor2, {0.0});
test::ExpectTensorNear<float>(
expected_gather_default_tensor2,
GetNodeTensorAttr(*(node_lookup.at("gather2/Const")), "value"), 1e-5);
EXPECT_EQ(1, node_lookup.count("gather2/ExpandDims/Const"));
EXPECT_EQ("Const", node_lookup.at("gather2/ExpandDims/Const")->op());
Tensor expected_expand_dims_tensor2(DT_INT32, TensorShape({}));
test::FillValues<int32>(&expected_expand_dims_tensor2, {-1});
test::ExpectTensorEqual<int32>(
expected_expand_dims_tensor2,
GetNodeTensorAttr(*(node_lookup.at("gather2/ExpandDims/Const")),
"value"));
EXPECT_EQ(1, node_lookup.count("gather2"));
EXPECT_EQ("ExpandDims", node_lookup.at("gather2")->op());
EXPECT_EQ("w1/part_1/HashTable",
node_lookup.at("w1/part_1/InitializeTable")->input(0));
EXPECT_EQ("w1/part_1/indices",
node_lookup.at("w1/part_1/InitializeTable")->input(1));
EXPECT_EQ("w1/part_1/values",
node_lookup.at("w1/part_1/InitializeTable")->input(2));
EXPECT_EQ("w2/part_1/HashTable",
node_lookup.at("w2/part_1/InitializeTable")->input(0));
EXPECT_EQ("w2/part_1/indices",
node_lookup.at("w2/part_1/InitializeTable")->input(1));
EXPECT_EQ("w2/part_1/values",
node_lookup.at("w2/part_1/InitializeTable")->input(2));
EXPECT_EQ("w1/part_1/HashTable",
node_lookup.at("gather1/LookupTableFind")->input(0));
EXPECT_EQ("ids", node_lookup.at("gather1/LookupTableFind")->input(1));
EXPECT_EQ("gather1/Const",
node_lookup.at("gather1/LookupTableFind")->input(2));
EXPECT_EQ("gather1/LookupTableFind", node_lookup.at("gather1")->input(0));
EXPECT_EQ("w2/part_1/HashTable",
node_lookup.at("gather2/LookupTableFind")->input(0));
EXPECT_EQ("ids", node_lookup.at("gather2/LookupTableFind")->input(1));
EXPECT_EQ("gather2/Const",
node_lookup.at("gather2/LookupTableFind")->input(2));
EXPECT_EQ("gather2/LookupTableFind", node_lookup.at("gather2")->input(0));
EXPECT_EQ(0, node_lookup.count("linear/concat/axis"));
EXPECT_EQ(0, node_lookup.count("concat/node"));
EXPECT_EQ(2, node_lookup.at(shared_init_name)->input_size());
EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(),
node_lookup.at(shared_init_name)->input().end(),
"^w1/part_1/InitializeTable"),
node_lookup.at(shared_init_name)->input().end());
EXPECT_NE(std::find(node_lookup.at(shared_init_name)->input().begin(),
node_lookup.at(shared_init_name)->input().end(),
"^w2/part_1/InitializeTable"),
node_lookup.at(shared_init_name)->input().end());
}
void TestReadTensorSlice() {
const auto checkpoint_path =
io::JoinPath(testing::TmpDir(), "checkpoint_slice");
Tensor weights(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&weights, {0.2, 0.000001});
BundleWriter writer(Env::Default(), checkpoint_path);
TF_ASSERT_OK(writer.AddSlice("w", TensorShape({4, 1}),
TensorSlice::ParseOrDie("0,2:0,1"), weights));
TF_ASSERT_OK(writer.Finish());
std::unique_ptr<BundleReader> reader(
new BundleReader(Env::Default(), checkpoint_path));
Tensor results;
TF_ASSERT_OK(
ReadTensorFromCheckpoint("w/part_0", reader, "4 1 0,2:0,1", &results));
test::ExpectTensorEqual<float>(weights, results);
}
};
TEST_F(SparsifyGatherTest, TestSinglePartition) {
TestSinglePartition(false, false, false, false);
TestSinglePartition(false, true, false, false);
TestSinglePartition(true, false, false, false);
TestSinglePartition(true, true, false, false);
TestSinglePartition(false, false, true, false);
TestSinglePartition(false, true, true, false);
TestSinglePartition(true, false, true, false);
TestSinglePartition(true, true, true, false);
TestSinglePartition(false, true, false, false, "shared_inits");
TestSinglePartition(true, true, false, false, "shared_inits");
TestSinglePartition(false, true, true, false, "shared_inits");
TestSinglePartition(true, true, true, false, "shared_inits");
TestSinglePartition(false, false, false, true);
TestSinglePartition(false, true, false, true);
TestSinglePartition(true, false, false, true);
TestSinglePartition(true, true, false, true);
TestSinglePartition(false, false, true, true);
TestSinglePartition(false, true, true, true);
TestSinglePartition(true, false, true, true);
TestSinglePartition(true, true, true, true);
TestSinglePartition(false, true, false, true, "shared_inits");
TestSinglePartition(true, true, false, true, "shared_inits");
TestSinglePartition(false, true, true, true, "shared_inits");
TestSinglePartition(true, true, true, true, "shared_inits");
}
TEST_F(SparsifyGatherTest, TestMultiPartition) {
TestMultiPartition(false, false, false);
TestMultiPartition(false, true, false);
TestMultiPartition(true, false, false);
TestMultiPartition(true, true, false);
TestMultiPartition(false, false, true);
TestMultiPartition(false, true, true);
TestMultiPartition(true, false, true);
TestMultiPartition(true, true, true);
TestMultiPartition(false, true, false, "shared_inits");
TestMultiPartition(true, true, false, "shared_inits");
TestMultiPartition(false, true, true, "shared_inits");
TestMultiPartition(true, true, true, "shared_inits");
}
TEST_F(SparsifyGatherTest, TestTensorSlice) { TestReadTensorSlice(); }
}
} | Status RemoveInputAtIndex(NodeDef* n, int index) {
for (int i = index; i < n->input_size() - 1; i++) {
n->mutable_input()->SwapElements(i, i + 1);
}
n->mutable_input()->RemoveLast();
return OkStatus();
} | TEST_F(SparsifyGatherTest, TestSinglePartition) {
TestSinglePartition(false, false, false, false);
TestSinglePartition(false, true, false, false);
TestSinglePartition(true, false, false, false);
TestSinglePartition(true, true, false, false);
TestSinglePartition(false, false, true, false);
TestSinglePartition(false, true, true, false);
TestSinglePartition(true, false, true, false);
TestSinglePartition(true, true, true, false);
TestSinglePartition(false, true, false, false, "shared_inits");
TestSinglePartition(true, true, false, false, "shared_inits");
TestSinglePartition(false, true, true, false, "shared_inits");
TestSinglePartition(true, true, true, false, "shared_inits");
TestSinglePartition(false, false, false, true);
TestSinglePartition(false, true, false, true);
TestSinglePartition(true, false, false, true);
TestSinglePartition(true, true, false, true);
TestSinglePartition(false, false, true, true);
TestSinglePartition(false, true, true, true);
TestSinglePartition(true, false, true, true);
TestSinglePartition(true, true, true, true);
TestSinglePartition(false, true, false, true, "shared_inits");
TestSinglePartition(true, true, false, true, "shared_inits");
TestSinglePartition(false, true, true, true, "shared_inits");
TestSinglePartition(true, true, true, true, "shared_inits");
}
TEST_F(SparsifyGatherTest, TestMultiPartition) {
TestMultiPartition(false, false, false);
TestMultiPartition(false, true, false);
TestMultiPartition(true, false, false);
TestMultiPartition(true, true, false);
TestMultiPartition(false, false, true);
TestMultiPartition(false, true, true);
TestMultiPartition(true, false, true);
TestMultiPartition(true, true, true);
TestMultiPartition(false, true, false, "shared_inits");
TestMultiPartition(true, true, false, "shared_inits");
TestMultiPartition(false, true, true, "shared_inits");
TestMultiPartition(true, true, true, "shared_inits");
} |
#include "arolla/naming/policy.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "arolla/naming/protopath_id.h"
#include "arolla/naming/table.h"
#include "arolla/util/indestructible.h"
namespace arolla::naming {
class PolicyImpl {
public:
explicit PolicyImpl(absl::string_view policy_name)
: policy_name_(policy_name) {}
virtual ~PolicyImpl() = default;
virtual std::string Format(const ColumnPath& path) const = 0;
virtual std::string Format(const TablePath& path) const = 0;
const std::string& Name() const { return policy_name_; }
private:
std::string policy_name_;
};
const std::string& Policy::Name() const { return policy_impl_->Name(); }
std::string Policy::Format(const ColumnPath& path) const {
return policy_impl_->Format(path);
}
std::string Policy::Format(const TablePath& path) const {
return policy_impl_->Format(path);
}
namespace {
class DefaultPolicyImpl : public PolicyImpl {
public:
DefaultPolicyImpl() : PolicyImpl(kDefaultPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return std::string(table_path.FullName());
}
std::string Format(const ColumnPath& column_path) const override {
return std::string(column_path.FullName());
}
};
class DoubleUnderscorePolicyImpl : public PolicyImpl {
public:
DoubleUnderscorePolicyImpl() : PolicyImpl(kDoubleUnderscorePolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Format(table_path.PathSegments());
}
std::string Format(const ColumnPath& column_path) const override {
return Format(column_path.PathSegments());
}
private:
static std::string MangleExtensionFieldName(absl::string_view field_name) {
if (absl::ConsumePrefix(&field_name, kExtensionFieldPrefix)) {
return absl::StrReplaceAll(absl::AsciiStrToLower(field_name),
{{".", "_"}});
} else {
return std::string(field_name);
}
}
std::string Format(const std::vector<PathSegment>& segments) const {
return absl::StrJoin(
segments, "__", [](std::string* ret, const PathSegment& segment) {
std::string field_name =
absl::StrReplaceAll(segment.FieldName(), {{"/", "__"}});
absl::StrAppend(ret, MangleExtensionFieldName(field_name));
});
}
};
class SingleUnderscorePolicyImpl : public PolicyImpl {
public:
SingleUnderscorePolicyImpl() : PolicyImpl(kSingleUnderscorePolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Reformat(table_path.FullName());
}
std::string Format(const ColumnPath& column_path) const override {
return Reformat(column_path.FullName());
}
private:
std::string Reformat(absl::string_view name) const {
if (name.empty()) return "";
return absl::StrReplaceAll(name.substr(1), {{"/", "_"}});
}
};
class LeafOnlyPolicyImpl : public PolicyImpl {
public:
LeafOnlyPolicyImpl() : PolicyImpl(kLeafOnlyPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Reformat(table_path.FullName());
}
std::string Format(const ColumnPath& column_path) const override {
return Reformat(column_path.FullName());
}
private:
std::string Reformat(absl::string_view name) const {
return std::string(absl::EndsWith(name, "@size")
? name
: name.substr(name.find_last_of('/') + 1));
}
};
class ProtopathIdPolicyImpl : public PolicyImpl {
public:
ProtopathIdPolicyImpl() : PolicyImpl(kProtopathIdPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return TablePathToProtopathId(table_path);
}
std::string Format(const ColumnPath& column_path) const override {
return ColumnPathToProtopathId(column_path);
}
};
class GoogleSQLPolicyImpl : public PolicyImpl {
public:
GoogleSQLPolicyImpl() : PolicyImpl(kGoogleSQLPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Format(table_path.PathSegments());
}
std::string Format(const ColumnPath& column_path) const override {
return Format(column_path.PathSegments());
}
private:
std::string Format(const std::vector<PathSegment>& segments) const {
return absl::StrJoin(
segments, ".", [](std::string* ret, const PathSegment& segment) {
absl::string_view field_name = segment.FieldName();
if (absl::ConsumePrefix(&field_name, kExtensionFieldPrefix)) {
absl::StrAppend(ret, "(", field_name, ")");
} else {
absl::StrAppend(ret, field_name);
}
});
}
};
}
Policy DefaultPolicy() {
static const Indestructible<DefaultPolicyImpl> impl;
return Policy{*impl};
}
Policy DoubleUnderscorePolicy() {
static const Indestructible<DoubleUnderscorePolicyImpl> impl;
return Policy{*impl};
}
Policy SingleUnderscorePolicy() {
static const Indestructible<SingleUnderscorePolicyImpl> impl;
return Policy{*impl};
}
Policy LeafOnlyPolicy() {
static const Indestructible<LeafOnlyPolicyImpl> impl;
return Policy{*impl};
}
Policy ProtopathIdPolicy() {
static const Indestructible<ProtopathIdPolicyImpl> impl;
return Policy{*impl};
}
Policy GoogleSQLPolicy() {
static const Indestructible<GoogleSQLPolicyImpl> impl;
return Policy{*impl};
}
absl::StatusOr<Policy> GetPolicy(absl::string_view policy_name) {
if (policy_name == kDefaultPolicyName) {
return DefaultPolicy();
}
if (policy_name == kDoubleUnderscorePolicyName) {
return DoubleUnderscorePolicy();
}
if (policy_name == kSingleUnderscorePolicyName) {
return SingleUnderscorePolicy();
}
if (policy_name == kLeafOnlyPolicyName) {
return LeafOnlyPolicy();
}
if (policy_name == kProtopathIdPolicyName) {
return ProtopathIdPolicy();
}
if (policy_name == kGoogleSQLPolicyName) {
return GoogleSQLPolicy();
}
return absl::InvalidArgumentError(
absl::StrFormat("undefined naming policy: %s", policy_name));
}
} | #include "arolla/naming/policy.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/naming/table.h"
#include "arolla/util/testing/status_matchers_backport.h"
using ::arolla::testing::StatusIs;
namespace arolla::naming {
namespace {
TEST(Policy, name) {
EXPECT_EQ(DefaultPolicy().Name(), "default");
EXPECT_EQ(DoubleUnderscorePolicy().Name(), "double_underscore");
}
TEST(Policy, format) {
TablePath root;
EXPECT_EQ(DefaultPolicy().Format(root), "");
EXPECT_EQ(DoubleUnderscorePolicy().Format(root), "");
EXPECT_EQ(SingleUnderscorePolicy().Format(root), "");
EXPECT_EQ(LeafOnlyPolicy().Format(root), "");
EXPECT_EQ(ProtopathIdPolicy().Format(root), "");
EXPECT_EQ(GoogleSQLPolicy().Format(root), "");
TablePath query("query");
EXPECT_EQ(DefaultPolicy().Format(query), "/query");
EXPECT_EQ(DoubleUnderscorePolicy().Format(query), "query");
EXPECT_EQ(SingleUnderscorePolicy().Format(query), "query");
EXPECT_EQ(LeafOnlyPolicy().Format(query), "query");
EXPECT_EQ(ProtopathIdPolicy().Format(query), "/query");
EXPECT_EQ(GoogleSQLPolicy().Format(query), "query");
TablePath doc = query.Child("docs", true);
EXPECT_EQ(DefaultPolicy().Format(doc), "/query/docs");
EXPECT_EQ(DoubleUnderscorePolicy().Format(doc), "query__docs");
EXPECT_EQ(SingleUnderscorePolicy().Format(doc), "query_docs");
EXPECT_EQ(LeafOnlyPolicy().Format(doc), "docs");
EXPECT_EQ(ProtopathIdPolicy().Format(doc), "/query/docs[:]");
EXPECT_EQ(GoogleSQLPolicy().Format(doc), "query.docs");
ColumnPath quality = doc.Column("quality");
EXPECT_EQ(DefaultPolicy().Format(quality), "/query/docs/quality");
EXPECT_EQ(DoubleUnderscorePolicy().Format(quality), "query__docs__quality");
EXPECT_EQ(SingleUnderscorePolicy().Format(quality), "query_docs_quality");
EXPECT_EQ(LeafOnlyPolicy().Format(quality), "quality");
EXPECT_EQ(ProtopathIdPolicy().Format(quality), "/query/docs[:]/quality");
EXPECT_EQ(GoogleSQLPolicy().Format(quality), "query.docs.quality");
ColumnPath terms_size = doc.Size("terms");
EXPECT_EQ(DefaultPolicy().Format(terms_size), "/query/docs/terms/@size");
EXPECT_EQ(DoubleUnderscorePolicy().Format(terms_size),
"query__docs__terms__@size");
EXPECT_EQ(SingleUnderscorePolicy().Format(terms_size),
"query_docs_terms_@size");
EXPECT_EQ(LeafOnlyPolicy().Format(terms_size), "/query/docs/terms/@size");
EXPECT_EQ(ProtopathIdPolicy().Format(terms_size),
"/query/docs[:]/terms/@size");
EXPECT_EQ(GoogleSQLPolicy().Format(terms_size), "query.docs.terms.@size");
TablePath ext = doc.Child(ProtoExtensionAccess("foo_pkg.Bar.baz_ext"));
EXPECT_EQ(DefaultPolicy().Format(ext),
"/query/docs/Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(DoubleUnderscorePolicy().Format(ext),
"query__docs__foo_pkg_bar_baz_ext");
EXPECT_EQ(SingleUnderscorePolicy().Format(ext),
"query_docs_Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(LeafOnlyPolicy().Format(ext), "Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(ProtopathIdPolicy().Format(ext),
"/query/docs[:]/Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(GoogleSQLPolicy().Format(ext), "query.docs.(foo_pkg.Bar.baz_ext)");
}
TEST(Policy, get_policy) {
EXPECT_EQ(GetPolicy("default").value().Name(), "default");
EXPECT_EQ(GetPolicy("double_underscore").value().Name(), "double_underscore");
EXPECT_EQ(GetPolicy("single_underscore").value().Name(), "single_underscore");
EXPECT_EQ(GetPolicy("leaf_only").value().Name(), "leaf_only");
EXPECT_THAT(GetPolicy("unknown-policy-XYZ"),
StatusIs(absl::StatusCode::kInvalidArgument,
"undefined naming policy: unknown-policy-XYZ"));
}
}
} | Policy DefaultPolicy() {
static const Indestructible<DefaultPolicyImpl> impl;
return Policy{*impl};
} | namespace arolla::naming {
namespace {
TEST(Policy, name) {
EXPECT_EQ(DefaultPolicy().Name(), "default");
EXPECT_EQ(DoubleUnderscorePolicy().Name(), "double_underscore");
}
TEST(Policy, format) {
TablePath root;
EXPECT_EQ(DefaultPolicy().Format(root), "");
EXPECT_EQ(DoubleUnderscorePolicy().Format(root), "");
EXPECT_EQ(SingleUnderscorePolicy().Format(root), "");
EXPECT_EQ(LeafOnlyPolicy().Format(root), "");
EXPECT_EQ(ProtopathIdPolicy().Format(root), "");
EXPECT_EQ(GoogleSQLPolicy().Format(root), "");
TablePath query("query");
EXPECT_EQ(DefaultPolicy().Format(query), "/query");
EXPECT_EQ(DoubleUnderscorePolicy().Format(query), "query");
EXPECT_EQ(SingleUnderscorePolicy().Format(query), "query");
EXPECT_EQ(LeafOnlyPolicy().Format(query), "query");
EXPECT_EQ(ProtopathIdPolicy().Format(query), "/query");
EXPECT_EQ(GoogleSQLPolicy().Format(query), "query");
TablePath doc = query.Child("docs", true);
EXPECT_EQ(DefaultPolicy().Format(doc), "/query/docs");
EXPECT_EQ(DoubleUnderscorePolicy().Format(doc), "query__docs");
EXPECT_EQ(SingleUnderscorePolicy().Format(doc), "query_docs");
EXPECT_EQ(LeafOnlyPolicy().Format(doc), "docs");
EXPECT_EQ(ProtopathIdPolicy().Format(doc), "/query/docs[:]");
EXPECT_EQ(GoogleSQLPolicy().Format(doc), "query.docs");
ColumnPath quality = doc.Column("quality");
EXPECT_EQ(DefaultPolicy().Format(quality), "/query/docs/quality");
EXPECT_EQ(DoubleUnderscorePolicy().Format(quality), "query__docs__quality");
EXPECT_EQ(SingleUnderscorePolicy().Format(quality), "query_docs_quality");
EXPECT_EQ(LeafOnlyPolicy().Format(quality), "quality");
EXPECT_EQ(ProtopathIdPolicy().Format(quality), "/query/docs[:]/quality");
EXPECT_EQ(GoogleSQLPolicy().Format(quality), "query.docs.quality");
ColumnPath terms_size = doc.Size("terms");
EXPECT_EQ(DefaultPolicy().Format(terms_size), "/query/docs/terms/@size");
EXPECT_EQ(DoubleUnderscorePolicy().Format(terms_size),
"query__docs__terms__@size");
EXPECT_EQ(SingleUnderscorePolicy().Format(terms_size),
"query_docs_terms_@size");
EXPECT_EQ(LeafOnlyPolicy().Format(terms_size), "/query/docs/terms/@size");
EXPECT_EQ(ProtopathIdPolicy().Format(terms_size),
"/query/docs[:]/terms/@size");
EXPECT_EQ(GoogleSQLPolicy().Format(terms_size), "query.docs.terms.@size");
TablePath ext = doc.Child(ProtoExtensionAccess("foo_pkg.Bar.baz_ext"));
EXPECT_EQ(DefaultPolicy().Format(ext),
"/query/docs/Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(DoubleUnderscorePolicy().Format(ext),
"query__docs__foo_pkg_bar_baz_ext");
EXPECT_EQ(SingleUnderscorePolicy().Format(ext),
"query_docs_Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(LeafOnlyPolicy().Format(ext), "Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(ProtopathIdPolicy().Format(ext),
"/query/docs[:]/Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(GoogleSQLPolicy().Format(ext), "query.docs.(foo_pkg.Bar.baz_ext)");
}
TEST(Policy, get_policy) {
EXPECT_EQ(GetPolicy("default").value().Name(), "default");
EXPECT_EQ(GetPolicy("double_underscore").value().Name(), "double_underscore");
EXPECT_EQ(GetPolicy("single_underscore").value().Name(), "single_underscore");
EXPECT_EQ(GetPolicy("leaf_only").value().Name(), "leaf_only");
EXPECT_THAT(GetPolicy("unknown-policy-XYZ"),
StatusIs(absl::StatusCode::kInvalidArgument,
"undefined naming policy: unknown-policy-XYZ"));
} |
#include <cstdio>
#include "gtest/gtest.h"
#if defined(GTEST_OS_ESP8266) || defined(GTEST_OS_ESP32) || \
(defined(GTEST_OS_NRF52) && defined(ARDUINO))
#ifdef GTEST_OS_ESP8266
extern "C" {
#endif
void setup() { testing::InitGoogleTest(); }
void loop() { RUN_ALL_TESTS(); }
#ifdef GTEST_OS_ESP8266
}
#endif
#elif defined(GTEST_OS_QURT)
GTEST_API_ int main() {
printf("Running main() from %s\n", __FILE__);
testing::InitGoogleTest();
return RUN_ALL_TESTS();
}
#else
GTEST_API_ int main(int argc, char **argv) {
printf("Running main() from %s\n", __FILE__);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#endif | #include "gtest/gtest.h"
namespace {
TEST(GTestMainTest, ShouldSucceed) {}
} | #include <cstdio>
#include "gtest/gtest.h"
#if defined(GTEST_OS_ESP8266) || defined(GTEST_OS_ESP32) || \
(defined(GTEST_OS_NRF52) && defined(ARDUINO))
#ifdef GTEST_OS_ESP8266
extern "C" {
#endif
void setup() { testing::InitGoogleTest(); } | #include "gtest/gtest.h"
namespace {
TEST(GTestMainTest, ShouldSucceed) {} |
#ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_MESSAGE_WRAPPER_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_MESSAGE_WRAPPER_H_
#include <cstdint>
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
#include "absl/base/macros.h"
#include "absl/numeric/bits.h"
#include "base/internal/message_wrapper.h"
namespace cel::interop_internal {
struct MessageWrapperAccess;
}
namespace google::api::expr::runtime {
class LegacyTypeInfoApis;
class MessageWrapper {
public:
class Builder {
public:
explicit Builder(google::protobuf::MessageLite* message)
: message_ptr_(reinterpret_cast<uintptr_t>(message)) {
ABSL_ASSERT(absl::countr_zero(reinterpret_cast<uintptr_t>(message)) >=
kTagSize);
}
explicit Builder(google::protobuf::Message* message)
: message_ptr_(reinterpret_cast<uintptr_t>(message) | kMessageTag) {
ABSL_ASSERT(absl::countr_zero(reinterpret_cast<uintptr_t>(message)) >=
kTagSize);
}
google::protobuf::MessageLite* message_ptr() const {
return reinterpret_cast<google::protobuf::MessageLite*>(message_ptr_ & kPtrMask);
}
bool HasFullProto() const {
return (message_ptr_ & kTagMask) == kMessageTag;
}
MessageWrapper Build(const LegacyTypeInfoApis* type_info) {
return MessageWrapper(message_ptr_, type_info);
}
private:
friend class MessageWrapper;
explicit Builder(uintptr_t message_ptr) : message_ptr_(message_ptr) {}
uintptr_t message_ptr_;
};
static_assert(alignof(google::protobuf::MessageLite) >= 2,
"Assume that valid MessageLite ptrs have a free low-order bit");
MessageWrapper() : message_ptr_(0), legacy_type_info_(nullptr) {}
MessageWrapper(const google::protobuf::MessageLite* message,
const LegacyTypeInfoApis* legacy_type_info)
: message_ptr_(reinterpret_cast<uintptr_t>(message)),
legacy_type_info_(legacy_type_info) {
ABSL_ASSERT(absl::countr_zero(reinterpret_cast<uintptr_t>(message)) >=
kTagSize);
}
MessageWrapper(const google::protobuf::Message* message,
const LegacyTypeInfoApis* legacy_type_info)
: message_ptr_(reinterpret_cast<uintptr_t>(message) | kMessageTag),
legacy_type_info_(legacy_type_info) {
ABSL_ASSERT(absl::countr_zero(reinterpret_cast<uintptr_t>(message)) >=
kTagSize);
}
bool HasFullProto() const { return (message_ptr_ & kTagMask) == kMessageTag; }
const google::protobuf::MessageLite* message_ptr() const {
return reinterpret_cast<const google::protobuf::MessageLite*>(message_ptr_ &
kPtrMask);
}
const LegacyTypeInfoApis* legacy_type_info() const {
return legacy_type_info_;
}
private:
friend struct ::cel::interop_internal::MessageWrapperAccess;
MessageWrapper(uintptr_t message_ptr,
const LegacyTypeInfoApis* legacy_type_info)
: message_ptr_(message_ptr), legacy_type_info_(legacy_type_info) {}
Builder ToBuilder() { return Builder(message_ptr_); }
static constexpr int kTagSize = ::cel::base_internal::kMessageWrapperTagSize;
static constexpr uintptr_t kTagMask =
::cel::base_internal::kMessageWrapperTagMask;
static constexpr uintptr_t kPtrMask =
::cel::base_internal::kMessageWrapperPtrMask;
static constexpr uintptr_t kMessageTag =
::cel::base_internal::kMessageWrapperTagMessageValue;
uintptr_t message_ptr_;
const LegacyTypeInfoApis* legacy_type_info_;
};
static_assert(sizeof(MessageWrapper) <= 2 * sizeof(uintptr_t),
"MessageWrapper must not increase CelValue size.");
}
#endif | #include "eval/public/message_wrapper.h"
#include <type_traits>
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
#include "eval/public/structs/trivial_legacy_type_info.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/casts.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
TEST(MessageWrapper, Size) {
static_assert(sizeof(MessageWrapper) <= 2 * sizeof(uintptr_t),
"MessageWrapper must not increase CelValue size.");
}
TEST(MessageWrapper, WrapsMessage) {
TestMessage test_message;
test_message.set_int64_value(20);
test_message.set_double_value(12.3);
MessageWrapper wrapped_message(&test_message, TrivialTypeInfo::GetInstance());
constexpr bool is_full_proto_runtime =
std::is_base_of_v<google::protobuf::Message, TestMessage>;
EXPECT_EQ(wrapped_message.message_ptr(),
static_cast<const google::protobuf::MessageLite*>(&test_message));
ASSERT_EQ(wrapped_message.HasFullProto(), is_full_proto_runtime);
}
TEST(MessageWrapperBuilder, Builder) {
TestMessage test_message;
MessageWrapper::Builder builder(&test_message);
constexpr bool is_full_proto_runtime =
std::is_base_of_v<google::protobuf::Message, TestMessage>;
ASSERT_EQ(builder.HasFullProto(), is_full_proto_runtime);
ASSERT_EQ(builder.message_ptr(),
static_cast<google::protobuf::MessageLite*>(&test_message));
auto mutable_message =
cel::internal::down_cast<TestMessage*>(builder.message_ptr());
mutable_message->set_int64_value(20);
mutable_message->set_double_value(12.3);
MessageWrapper wrapped_message =
builder.Build(TrivialTypeInfo::GetInstance());
ASSERT_EQ(wrapped_message.message_ptr(),
static_cast<const google::protobuf::MessageLite*>(&test_message));
ASSERT_EQ(wrapped_message.HasFullProto(), is_full_proto_runtime);
EXPECT_EQ(wrapped_message.message_ptr(),
static_cast<const google::protobuf::MessageLite*>(&test_message));
EXPECT_EQ(test_message.int64_value(), 20);
EXPECT_EQ(test_message.double_value(), 12.3);
}
TEST(MessageWrapper, DefaultNull) {
MessageWrapper wrapper;
EXPECT_EQ(wrapper.message_ptr(), nullptr);
EXPECT_EQ(wrapper.legacy_type_info(), nullptr);
}
}
} | MessageWrapper(const google::protobuf::Message* message,
const LegacyTypeInfoApis* legacy_type_info)
: message_ptr_(reinterpret_cast<uintptr_t>(message) | kMessageTag),
legacy_type_info_(legacy_type_info) {
ABSL_ASSERT(absl::countr_zero(reinterpret_cast<uintptr_t>(message)) >=
kTagSize);
} | TEST(MessageWrapperBuilder, Builder) {
TestMessage test_message;
MessageWrapper::Builder builder(&test_message);
constexpr bool is_full_proto_runtime =
std::is_base_of_v<google::protobuf::Message, TestMessage>;
ASSERT_EQ(builder.HasFullProto(), is_full_proto_runtime);
ASSERT_EQ(builder.message_ptr(),
static_cast<google::protobuf::MessageLite*>(&test_message));
auto mutable_message =
cel::internal::down_cast<TestMessage*>(builder.message_ptr());
mutable_message->set_int64_value(20);
mutable_message->set_double_value(12.3);
MessageWrapper wrapped_message =
builder.Build(TrivialTypeInfo::GetInstance());
ASSERT_EQ(wrapped_message.message_ptr(),
static_cast<const google::protobuf::MessageLite*>(&test_message));
ASSERT_EQ(wrapped_message.HasFullProto(), is_full_proto_runtime);
EXPECT_EQ(wrapped_message.message_ptr(),
static_cast<const google::protobuf::MessageLite*>(&test_message));
EXPECT_EQ(test_message.int64_value(), 20);
EXPECT_EQ(test_message.double_value(), 12.3);
}
TEST(MessageWrapper, WrapsMessage) {
TestMessage test_message;
test_message.set_int64_value(20);
test_message.set_double_value(12.3);
MessageWrapper wrapped_message(&test_message, TrivialTypeInfo::GetInstance());
constexpr bool is_full_proto_runtime =
std::is_base_of_v<google::protobuf::Message, TestMessage>;
EXPECT_EQ(wrapped_message.message_ptr(),
static_cast<const google::protobuf::MessageLite*>(&test_message));
ASSERT_EQ(wrapped_message.HasFullProto(), is_full_proto_runtime);
} |
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstddef>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using RefCountHandle = ::tsl::core::RefCountPtr<tensorflow::TensorHandle>;
size_t GetSizeInBytes(const tensorflow::Tensor& tensor) {
return tensor.shape().num_elements() * DataTypeSize(tensor.dtype());
}
tensorflow::Tensor GetTensor(const DtypeAndShape& dtype_and_shape, void* src) {
DCHECK(DataTypeCanUseMemcpy(dtype_and_shape.dtype));
tensorflow::Tensor t(dtype_and_shape.dtype, dtype_and_shape.shape);
std::memcpy(t.data(), src, GetSizeInBytes(t));
return t;
}
void CopyToBuffer(void* dst, const tensorflow::Tensor& tensor) {
DCHECK(DataTypeCanUseMemcpy(tensor.dtype()));
std::memcpy(dst, tensor.data(), GetSizeInBytes(tensor));
}
}
absl::Status TfHostCallback::Call(void** inputs, void** outputs) {
tsl::profiler::TraceMe trace_me("TfHostCallback::Call");
tensorflow::ImmediateOpPtr op(ctx_->CreateOperation());
TF_RETURN_IF_ERROR(
op->Reset(entry_function_name_.c_str(), nullptr));
ctx_->StartStep();
absl::Cleanup cleanup_step = [this]() { ctx_->EndStep(); };
for (int i = 0; i < operand_type_and_shapes_.size(); ++i) {
tensorflow::Tensor t = GetTensor(operand_type_and_shapes_[i], inputs[i]);
RefCountHandle handle(tensorflow::down_cast<tensorflow::TensorHandle*>(
ctx_->CreateLocalHandleFromTFTensor(t, nullptr)));
TF_RETURN_IF_ERROR(op->AddInput(handle.get()));
}
int num_outputs = result_type_and_shapes_.size();
absl::FixedArray<tensorflow::AbstractTensorHandle*> output_raw_handles(
num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::MakeSpan(output_raw_handles), &num_outputs));
std::vector<RefCountHandle> output_handles;
output_handles.reserve(num_outputs);
for (auto* output_raw_handle : output_raw_handles) {
output_handles.emplace_back(
tensorflow::down_cast<tensorflow::TensorHandle*>(output_raw_handle));
}
if (result_type_and_shapes_.size() != num_outputs) {
return absl::InternalError(absl::StrCat(
"TF host callback invocation expected ", result_type_and_shapes_.size(),
" results, instead got ", num_outputs));
}
for (int i = 0; i < num_outputs; ++i) {
const tensorflow::Tensor* tensor;
TF_RETURN_IF_ERROR(output_handles[i]->Tensor(&tensor));
CopyToBuffer(outputs[i], *tensor);
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<TfHostCallback>> TfHostCallback::Create(
absl::Span<const tensorflow::FunctionDef> functions,
absl::string_view entry_function_name,
absl::Span<const DtypeAndShape> operand_type_and_shapes,
absl::Span<const DtypeAndShape> result_type_and_shapes,
tensorflow::DeviceMgr* device_mgr) {
tensorflow::SessionOptions options;
options.config.add_device_filters("/device:CPU:*");
DCHECK(device_mgr != nullptr);
tensorflow::EagerContextPtr ctx(new tensorflow::EagerContext(
options,
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr,
false,
nullptr,
nullptr,
nullptr,
true));
for (const tensorflow::FunctionDef& function : functions) {
TF_RETURN_IF_ERROR(ctx->AddFunctionDef(function));
}
return absl::WrapUnique(
new TfHostCallback(entry_function_name, operand_type_and_shapes,
result_type_and_shapes, std::move(ctx)));
}
absl::StatusOr<std::unique_ptr<tensorflow::StaticDeviceMgr>>
CreateTfStaticDeviceMgr() {
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddCpuDevices(
tensorflow::SessionOptions(), "/job:localhost/replica:0/task:0",
&devices));
return std::make_unique<tensorflow::StaticDeviceMgr>(std::move(devices));
}
}
} | #include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
absl::StatusOr<tensorflow::FunctionDef> ToFunctionDef(
tensorflow::Scope scope, const std::string& function_name) {
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
tensorflow::FunctionDef function_def;
TF_RETURN_IF_ERROR(
tensorflow::GraphToFunctionDef(*graph, function_name, &function_def));
return function_def;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddOneFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
auto const0_value = tensorflow::test::AsScalar<float>(1);
auto const0 =
tensorflow::ops::Const(scope.WithOpName("const0"),
tensorflow::Input::Initializer(const0_value));
auto add0 = tensorflow::ops::Add(scope.WithOpName("add0"), arg0, const0);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add0, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<std::vector<tensorflow::FunctionDef>>
MakeAddOneWithCallFunctionDef(const std::string& function_name) {
std::vector<tensorflow::FunctionDef> function_defs;
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
MakeAddOneFunctionDef("add"));
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
tensorflow::NameAttrList f;
f.set_name("add");
auto call = tensorflow::ops::StatefulPartitionedCall(
scope.WithOpName("call"), {arg0.output}, {tensorflow::DT_FLOAT}, f);
auto retval0 = tensorflow::ops::_Retval(scope.WithOpName("retval0"),
call.output[0], 0);
}
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
ToFunctionDef(std::move(scope), function_name));
return function_defs;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAssignVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
arg0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
auto read = tensorflow::ops::ReadVariableOp(scope.WithOpName("read"), var,
tensorflow::DT_INT32);
auto add = tensorflow::ops::Add(scope.WithOpName("add"), read, arg0);
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
add);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
TEST(TfHostCallbackTest, Simple) {
ASSERT_OK_AND_ASSIGN(auto function_defs,
MakeAddOneWithCallFunctionDef("main"));
auto in = AsTensor<float>({2.5f}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back({.dtype = in.dtype(), .shape = in.shape()});
auto out = AsTensor<float>({0.0f}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back({.dtype = out.dtype(), .shape = out.shape()});
ASSERT_OK_AND_ASSIGN(auto device_mgr, CreateTfStaticDeviceMgr());
ASSERT_OK_AND_ASSIGN(auto tf_host_callback,
tensorflow::ifrt_serving::TfHostCallback::Create(
function_defs, "main", in_dtype_shapes,
out_dtype_shapes, device_mgr.get()));
ASSERT_OK(tf_host_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out,
TensorEq(AsTensor<float>({3.5f}, tensorflow::TensorShape({1}))));
}
TEST(TfHostCallbackTest, SharedState) {
tensorflow::ConfigProto session_config;
ASSERT_OK_AND_ASSIGN(auto state, CreateTfStaticDeviceMgr());
std::unique_ptr<TfHostCallback> assign_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAssignVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
ASSERT_OK_AND_ASSIGN(
assign_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
std::unique_ptr<TfHostCallback> incr_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAddVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
ASSERT_OK_AND_ASSIGN(
incr_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
constexpr int32_t kInit = 2;
{
auto in = AsTensor<int32_t>({kInit}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
void* out_ptrs[0];
ASSERT_OK(assign_callback->Call(in_ptrs, out_ptrs));
}
for (int i = 0; i < 3; ++i) {
auto in = AsTensor<int32_t>({1}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
auto out = AsTensor<int32_t>({0}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
ASSERT_OK(incr_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out, TensorEq(AsTensor<int32_t>({kInit + i + 1},
tensorflow::TensorShape({1}))));
}
}
}
}
} | absl::Status TfHostCallback::Call(void** inputs, void** outputs) {
tsl::profiler::TraceMe trace_me("TfHostCallback::Call");
tensorflow::ImmediateOpPtr op(ctx_->CreateOperation());
TF_RETURN_IF_ERROR(
op->Reset(entry_function_name_.c_str(), nullptr));
ctx_->StartStep();
absl::Cleanup cleanup_step = [this]() { ctx_->EndStep(); };
for (int i = 0; i < operand_type_and_shapes_.size(); ++i) {
tensorflow::Tensor t = GetTensor(operand_type_and_shapes_[i], inputs[i]);
RefCountHandle handle(tensorflow::down_cast<tensorflow::TensorHandle*>(
ctx_->CreateLocalHandleFromTFTensor(t, nullptr)));
TF_RETURN_IF_ERROR(op->AddInput(handle.get()));
}
int num_outputs = result_type_and_shapes_.size();
absl::FixedArray<tensorflow::AbstractTensorHandle*> output_raw_handles(
num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::MakeSpan(output_raw_handles), &num_outputs));
std::vector<RefCountHandle> output_handles;
output_handles.reserve(num_outputs);
for (auto* output_raw_handle : output_raw_handles) {
output_handles.emplace_back(
tensorflow::down_cast<tensorflow::TensorHandle*>(output_raw_handle));
}
if (result_type_and_shapes_.size() != num_outputs) {
return absl::InternalError(absl::StrCat(
"TF host callback invocation expected ", result_type_and_shapes_.size(),
" results, instead got ", num_outputs));
}
for (int i = 0; i < num_outputs; ++i) {
const tensorflow::Tensor* tensor;
TF_RETURN_IF_ERROR(output_handles[i]->Tensor(&tensor));
CopyToBuffer(outputs[i], *tensor);
}
return absl::OkStatus();
} | TEST(TfHostCallbackTest, Simple) {
ASSERT_OK_AND_ASSIGN(auto function_defs,
MakeAddOneWithCallFunctionDef("main"));
auto in = AsTensor<float>({2.5f}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back({.dtype = in.dtype(), .shape = in.shape()});
auto out = AsTensor<float>({0.0f}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back({.dtype = out.dtype(), .shape = out.shape()});
ASSERT_OK_AND_ASSIGN(auto device_mgr, CreateTfStaticDeviceMgr());
ASSERT_OK_AND_ASSIGN(auto tf_host_callback,
tensorflow::ifrt_serving::TfHostCallback::Create(
function_defs, "main", in_dtype_shapes,
out_dtype_shapes, device_mgr.get()));
ASSERT_OK(tf_host_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out,
TensorEq(AsTensor<float>({3.5f}, tensorflow::TensorShape({1}))));
}
TEST(TfHostCallbackTest, SharedState) {
tensorflow::ConfigProto session_config;
ASSERT_OK_AND_ASSIGN(auto state, CreateTfStaticDeviceMgr());
std::unique_ptr<TfHostCallback> assign_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAssignVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
ASSERT_OK_AND_ASSIGN(
assign_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
std::unique_ptr<TfHostCallback> incr_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAddVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
ASSERT_OK_AND_ASSIGN(
incr_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
constexpr int32_t kInit = 2;
{
auto in = AsTensor<int32_t>({kInit}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
void* out_ptrs[0];
ASSERT_OK(assign_callback->Call(in_ptrs, out_ptrs));
}
for (int i = 0; i < 3; ++i) {
auto in = AsTensor<int32_t>({1}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
auto out = AsTensor<int32_t>({0}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
ASSERT_OK(incr_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out, TensorEq(AsTensor<int32_t>({kInit + i + 1},
tensorflow::TensorShape({1}))));
}
} |
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <ostream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
std::pair<std::unique_ptr<Allocation>, std::vector<uint8_t>> CopyModel(
const Allocation* input, ErrorReporter* error_reporter) {
std::vector<uint8_t> copy;
if (!input) {
return {nullptr, copy};
}
copy.resize(input->bytes());
memcpy(copy.data(), input->base(), input->bytes());
return {std::make_unique<MemoryAllocation>(copy.data(), copy.size(),
error_reporter),
std::move(copy)};
}
class FdHolder {
public:
explicit FdHolder(int fd) : fd_(fd) {}
FdHolder(FdHolder&& other) = default;
FdHolder& operator=(FdHolder&& other) = default;
~FdHolder() {
if (fd_ > 0) {
close(fd_);
}
}
private:
int fd_;
};
std::unique_ptr<FdHolder> UpdateModelPathIfUsingFd(std::string& model_path) {
if (!absl::StartsWith(model_path, "fd:")) {
return nullptr;
}
std::vector<std::string> parts = absl::StrSplit(model_path, ':');
int model_fd;
if (!absl::SimpleAtoi(parts[1], &model_fd)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to parse file descriptor %s from model_path %s",
parts[1].c_str(), model_path.c_str());
return nullptr;
}
int new_fd = dup(model_fd);
if (new_fd < 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Failed to dup() file descriptor. Original fd: %d errno: %d", model_fd,
errno);
return nullptr;
}
parts[1] = std::to_string(new_fd);
model_path = absl::StrJoin(parts, ":");
return std::make_unique<FdHolder>(new_fd);
}
}
MinibenchmarkStatus ValidatorRunnerImpl::Init() {
if (storage_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "storage_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (data_directory_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "data_directory_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (benchmark_evaluator_ == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter_, "benchmark_evaluator is null.");
return kMinibenchmarkPreconditionNotMet;
}
MinibenchmarkStatus status = storage_.Read();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Storage::Read failed.");
return status;
}
std::unique_ptr<tools::ModelLoader> model_loader =
tools::CreateModelLoaderFromPath(fd_or_model_path_);
if (!model_loader) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to parse model path.");
return kMinibenchmarkPreconditionNotMet;
}
if (!model_loader->Init() || !model_loader->GetModel()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load model.");
return kMinibenchmarkModelInitFailed;
}
if (custom_validation_embedder_) {
status = custom_validation_embedder_->BuildModel(
*model_loader->GetModel()->GetModel(), model_with_custom_input_);
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Failed to embed golden input to model: %d",
static_cast<int>(status));
return status;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
model_with_custom_input_.GetBufferPointer(),
model_with_custom_input_.GetSize(), error_reporter_);
} else if (model_loader->type() ==
tools::ModelLoader::Type::kBufferModelLoader) {
const Allocation* alloc = model_loader->GetModel()->allocation();
if (!alloc || !alloc->valid() || !alloc->base() || alloc->bytes() <= 0) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Internal error: BufferModelLoader doesn't have a "
"valid allocation.");
return kMinibenchmarkPreconditionNotMet;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
alloc->base(), alloc->bytes(), error_reporter_);
}
status = nnapi_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load NNAPI SL: %d",
static_cast<int>(status));
return status;
}
status = gpu_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load GPU Module: %d",
static_cast<int>(status));
return status;
}
status = validation_entrypoint_helper_.Validate();
if (status != kMinibenchmarkSuccess) {
return status;
}
ProcessRunner check_runner(data_directory_path_,
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint_helper_.LoadEntrypoint(),
timeout_ms_, error_reporter_);
status = check_runner.Init();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Runner::Init returned %d",
static_cast<int>(status));
return status;
}
return kMinibenchmarkSuccess;
}
void ValidatorRunnerImpl::TriggerValidationAsync(
std::vector<FlatBufferBuilder> tflite_settings,
absl::string_view storage_path) {
if (tflite_settings.empty()) {
return;
}
storage_ = FlatbufferStorage<BenchmarkEvent>(storage_path, error_reporter_);
std::thread detached_thread(
[original_model_path = fd_or_model_path_,
storage_path = std::string(storage_path),
data_directory_path = data_directory_path_,
tflite_settings = std::move(tflite_settings),
validation_entrypoint_name =
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint = validation_entrypoint_helper_.LoadEntrypoint(),
nnapi_sl_path = nnapi_helper_.nnapi_sl_path(),
gpu_so_path = gpu_helper_.gpu_so_path(),
allocation_and_model =
CopyModel(model_allocation_.get(), error_reporter_),
timeout_ms = timeout_ms_]() {
FileLock lock(absl::StrCat(storage_path, ".parent_lock"));
if (!lock.TryLock()) {
return;
}
std::string model_path = original_model_path;
std::unique_ptr<FdHolder> fd_holder =
UpdateModelPathIfUsingFd(model_path);
for (auto& one_setting : tflite_settings) {
FlatbufferStorage<BenchmarkEvent> storage(storage_path);
TFLiteSettingsT tflite_settings_obj;
flatbuffers::GetRoot<TFLiteSettings>(one_setting.GetBufferPointer())
->UnPackTo(&tflite_settings_obj);
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Run validation with entry point '%s' %s",
validation_entrypoint_name, storage_path.c_str());
ProcessRunner runner(data_directory_path, validation_entrypoint_name,
validation_entrypoint, timeout_ms);
int exitcode = 0;
int signal = 0;
MinibenchmarkStatus status = runner.Init();
if (status == kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
status = storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_START, 0, 0,
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
if (status != kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_INITIALIZATION,
exitcode, signal, {},
status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
continue;
}
std::vector<std::string> args;
if (!allocation_and_model.first) {
args.push_back(model_path);
}
args.push_back(storage_path);
args.push_back(data_directory_path);
if (tflite_settings_obj.delegate == tflite::Delegate_NNAPI &&
!nnapi_sl_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using NNAPI support library at path '%s'",
nnapi_sl_path.c_str());
args.push_back(nnapi_sl_path);
} else if (tflite_settings_obj.delegate == tflite::Delegate_GPU &&
!gpu_so_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using GPU Delegate Module at path '%s'",
gpu_so_path.c_str());
args.push_back(gpu_so_path);
}
std::string output;
status = runner.Run(allocation_and_model.first.get(), args, &output,
&exitcode, &signal);
if (status != kMinibenchmarkSuccess) {
std::cout << "Run() returned " << status << std::endl;
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN, exitcode,
signal, {}, status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
}
});
detached_thread.detach();
}
std::vector<const BenchmarkEvent*>
ValidatorRunnerImpl::GetSuccessfulResultsFromStorage() {
std::vector<const BenchmarkEvent*> results;
storage_.Read();
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Benchmark event(%d).",
event->event_type());
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
results.push_back(event);
} else if (event->event_type() == BenchmarkEventType_ERROR) {
TFLITE_LOG(
TFLITE_LOG_WARNING,
"Benchmark event failed with error code (%d), signal (%d), exit code "
"(%d), stage (%d), mini benchmark error code (%d).\n",
event->error()->error_code(), event->error()->signal(),
event->error()->exit_code(), event->error()->stage(),
event->error()->mini_benchmark_error_code());
}
}
return results;
}
std::vector<FlatBufferBuilder> ValidatorRunnerImpl::GetCompletedResults() {
storage_.Read();
std::vector<FlatBufferBuilder> results;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() != BenchmarkEventType_ERROR &&
event->event_type() != BenchmarkEventType_END) {
continue;
}
BenchmarkEventT event_obj;
event->UnPackTo(&event_obj);
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
event_obj.result->ok = true;
}
FlatBufferBuilder fbb;
fbb.Finish(CreateBenchmarkEvent(fbb, &event_obj));
results.emplace_back(std::move(fbb));
}
return results;
}
int ValidatorRunnerImpl::GetNumCompletedResults() {
storage_.Read();
int num_results = 0;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() == BenchmarkEventType_ERROR ||
(event->event_type() == BenchmarkEventType_END && event->result())) {
num_results++;
}
}
return num_results;
}
MinibenchmarkStatus
ValidatorRunnerImpl::ValidationEntrypointHelper::Validate() {
#ifndef _WIN32
if (!LoadEntrypoint()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load symbol '%s': '%s'",
validation_entrypoint_name_.c_str(), dlerror());
return kMinibenchmarkValidationEntrypointSymbolNotFound;
}
return kMinibenchmarkSuccess;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
ValidatorRunnerImpl::ValidationEntrypointHelper::EntrypointFunc*
ValidatorRunnerImpl::ValidationEntrypointHelper::LoadEntrypoint() {
#ifndef _WIN32
return reinterpret_cast<int (*)(int, char**)>(
dlsym(RTLD_DEFAULT, validation_entrypoint_name_.c_str()));
#endif
return nullptr;
}
MinibenchmarkStatus ValidatorRunnerImpl::NnapiHelper::Load() {
if (nnapi_sl_) {
#ifndef _WIN32
Dl_info dl_info;
if (!nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
int status = dladdr(reinterpret_cast<void*>(
nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel),
&dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
nnapi_sl_path_ = dl_info.dli_fname;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus ValidatorRunnerImpl::GpuHelper::Load() {
if (gpu_plugin_handle_) {
#ifndef _WIN32
Dl_info dl_info;
int status = dladdr(gpu_plugin_handle_, &dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMinibenchmarkCannotLoadGpuModule;
}
gpu_so_path_ = dl_info.dli_fname;
}
#else
return kMinibenchmarkUnsupportedPlatform;
}
#endif
return kMinibenchmarkSuccess;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#include "tensorflow/lite/stderr_reporter.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_validator_runner_entrypoint.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20);
class AlwaysTrueEvaluator : public AbstractBenchmarkResultEvaluator {
public:
bool HasPassedAccuracyCheck(const BenchmarkResult& result) override {
return true;
}
};
class ValidatorRunnerImplTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
nnapi_sl_dump_path_ = helper.DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
options_.data_directory_path = ::testing::TempDir();
options_.storage_path = ::testing::TempDir() + "/storage_path.fb";
options_.validation_entrypoint_name =
"Java_org_tensorflow_lite_acceleration_validation_entrypoint";
options_.error_reporter = tflite::DefaultErrorReporter();
options_.benchmark_result_evaluator =
EmbeddedResultEvaluator::GetInstance();
options_.per_test_timeout_ms = 0;
options_.model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!options_.model_path.empty());
plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path_.empty());
}
void TearDown() override {
if (should_perform_test_) {
ASSERT_EQ(unlink(options_.storage_path.c_str()), 0);
}
}
ValidatorRunnerImpl CreateValidator() {
return ValidatorRunnerImpl(
CreateModelLoaderPath(options_), options_.storage_path,
options_.data_directory_path, options_.per_test_timeout_ms,
std::move(custom_validation_embedder_), options_.error_reporter,
options_.nnapi_sl, options_.gpu_plugin_handle,
options_.validation_entrypoint_name,
options_.benchmark_result_evaluator);
}
bool should_perform_test_;
ValidatorRunnerOptions options_{};
std::string plain_model_path_;
std::unique_ptr<CustomValidationEmbedder> custom_validation_embedder_ =
nullptr;
std::string nnapi_sl_dump_path_;
};
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithNnApiSlAndEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
InitNnApiSlInvocationStatus();
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> fake_nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_dump_path_);
ASSERT_THAT(fake_nnapi_sl.get(), ::testing::NotNull());
options_.nnapi_sl = fake_nnapi_sl->getFL5();
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(
CreateTFLiteSettings(tflite_settings[0], Delegate_NNAPI,
CreateNNAPISettings(tflite_settings[0])));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<const BenchmarkEvent*> results =
validator.GetSuccessfulResultsFromStorage();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
ASSERT_THAT(result, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_THAT(result->result()->actual_output(),
testing::Pointee(testing::SizeIs(0)));
}
EXPECT_TRUE(WasNnApiSlInvoked());
}
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithBufferModelAndCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer = g_tflite_acceleration_embedded_mobilenet_model;
options_.model_size = g_tflite_acceleration_embedded_mobilenet_model_len;
options_.model_path.clear();
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsOkWithCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsNotOkIfCustomValidationFailed) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_FALSE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest, FailIfItCannotFindNnApiSlPath) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
NnApiSLDriverImplFL5 wrong_handle_nnapi_sl{};
options_.nnapi_sl = &wrong_handle_nnapi_sl;
ValidatorRunnerImpl validator = CreateValidator();
EXPECT_EQ(validator.Init(), kMiniBenchmarkCannotLoadSupportLibrary);
}
TEST_F(ValidatorRunnerImplTest, FailWithInvalidEntrypoint) {
options_.validation_entrypoint_name = "invalid_name()";
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationEntrypointSymbolNotFound);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotLoadModel) {
options_.model_path = "invalid/path";
EXPECT_EQ(CreateValidator().Init(), kMinibenchmarkModelInitFailed);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotEmbedInputData) {
options_.model_path = plain_model_path_;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
1, std::vector<std::vector<uint8_t>>(2));
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationSubgraphBuildFailed);
}
}
}
} | std::vector<const BenchmarkEvent*>
ValidatorRunnerImpl::GetSuccessfulResultsFromStorage() {
std::vector<const BenchmarkEvent*> results;
storage_.Read();
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Benchmark event(%d).",
event->event_type());
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
results.push_back(event);
} else if (event->event_type() == BenchmarkEventType_ERROR) {
TFLITE_LOG(
TFLITE_LOG_WARNING,
"Benchmark event failed with error code (%d), signal (%d), exit code "
"(%d), stage (%d), mini benchmark error code (%d).\n",
event->error()->error_code(), event->error()->signal(),
event->error()->exit_code(), event->error()->stage(),
event->error()->mini_benchmark_error_code());
}
}
return results;
} | TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithNnApiSlAndEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
InitNnApiSlInvocationStatus();
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> fake_nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_dump_path_);
ASSERT_THAT(fake_nnapi_sl.get(), ::testing::NotNull());
options_.nnapi_sl = fake_nnapi_sl->getFL5();
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(
CreateTFLiteSettings(tflite_settings[0], Delegate_NNAPI,
CreateNNAPISettings(tflite_settings[0])));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<const BenchmarkEvent*> results =
validator.GetSuccessfulResultsFromStorage();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
ASSERT_THAT(result, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_THAT(result->result()->actual_output(),
testing::Pointee(testing::SizeIs(0)));
}
EXPECT_TRUE(WasNnApiSlInvoked());
}
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithBufferModelAndCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer = g_tflite_acceleration_embedded_mobilenet_model;
options_.model_size = g_tflite_acceleration_embedded_mobilenet_model_len;
options_.model_path.clear();
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
} |
#include "runtime/standard/type_conversion_functions.h"
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/overflow.h"
#include "internal/status_macros.h"
#include "internal/time.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel {
namespace {
using ::cel::internal::EncodeDurationToJson;
using ::cel::internal::EncodeTimestampToJson;
using ::cel::internal::MaxTimestamp;
const absl::Time kMaxTime = MaxTimestamp();
absl::Status RegisterIntConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<int64_t, bool>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager&, bool v) { return static_cast<int64_t>(v); },
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<Value, double>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager& value_factory, double v) -> Value {
auto conv = cel::internal::CheckedDoubleToInt64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateIntValue(*conv);
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<int64_t, int64_t>::RegisterGlobalOverload(
cel::builtin::kInt, [](ValueManager&, int64_t v) { return v; }, registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager& value_factory, const StringValue& s) -> Value {
int64_t result;
if (!absl::SimpleAtoi(s.ToString(), &result)) {
return value_factory.CreateErrorValue(
absl::InvalidArgumentError("cannot convert string to int"));
}
return value_factory.CreateIntValue(result);
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<int64_t, absl::Time>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager&, absl::Time t) { return absl::ToUnixSeconds(t); },
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<Value, uint64_t>::RegisterGlobalOverload(
cel::builtin::kInt,
[](ValueManager& value_factory, uint64_t v) -> Value {
auto conv = cel::internal::CheckedUint64ToInt64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateIntValue(*conv);
},
registry);
}
absl::Status RegisterStringConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
if (!options.enable_string_conversion) {
return absl::OkStatus();
}
absl::Status status =
UnaryFunctionAdapter<Value, const BytesValue&>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, const BytesValue& value) -> Value {
auto handle_or = value_factory.CreateStringValue(value.ToString());
if (!handle_or.ok()) {
return value_factory.CreateErrorValue(handle_or.status());
}
return *handle_or;
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<StringValue, double>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, double value) -> StringValue {
return value_factory.CreateUncheckedStringValue(absl::StrCat(value));
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<StringValue, int64_t>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, int64_t value) -> StringValue {
return value_factory.CreateUncheckedStringValue(absl::StrCat(value));
},
registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<StringValue, StringValue>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager&, StringValue value) -> StringValue { return value; },
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<StringValue, uint64_t>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, uint64_t value) -> StringValue {
return value_factory.CreateUncheckedStringValue(absl::StrCat(value));
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<Value, absl::Duration>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, absl::Duration value) -> Value {
auto encode = EncodeDurationToJson(value);
if (!encode.ok()) {
return value_factory.CreateErrorValue(encode.status());
}
return value_factory.CreateUncheckedStringValue(*encode);
},
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<Value, absl::Time>::RegisterGlobalOverload(
cel::builtin::kString,
[](ValueManager& value_factory, absl::Time value) -> Value {
auto encode = EncodeTimestampToJson(value);
if (!encode.ok()) {
return value_factory.CreateErrorValue(encode.status());
}
return value_factory.CreateUncheckedStringValue(*encode);
},
registry);
}
absl::Status RegisterUintConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<Value, double>::RegisterGlobalOverload(
cel::builtin::kUint,
[](ValueManager& value_factory, double v) -> Value {
auto conv = cel::internal::CheckedDoubleToUint64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateUintValue(*conv);
},
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<Value, int64_t>::RegisterGlobalOverload(
cel::builtin::kUint,
[](ValueManager& value_factory, int64_t v) -> Value {
auto conv = cel::internal::CheckedInt64ToUint64(v);
if (!conv.ok()) {
return value_factory.CreateErrorValue(conv.status());
}
return value_factory.CreateUintValue(*conv);
},
registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kUint,
[](ValueManager& value_factory, const StringValue& s) -> Value {
uint64_t result;
if (!absl::SimpleAtoi(s.ToString(), &result)) {
return value_factory.CreateErrorValue(
absl::InvalidArgumentError("doesn't convert to a string"));
}
return value_factory.CreateUintValue(result);
},
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<uint64_t, uint64_t>::RegisterGlobalOverload(
cel::builtin::kUint, [](ValueManager&, uint64_t v) { return v; },
registry);
}
absl::Status RegisterBytesConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<BytesValue, BytesValue>::RegisterGlobalOverload(
cel::builtin::kBytes,
[](ValueManager&, BytesValue value) -> BytesValue { return value; },
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<absl::StatusOr<BytesValue>, const StringValue&>::
RegisterGlobalOverload(
cel::builtin::kBytes,
[](ValueManager& value_factory, const StringValue& value) {
return value_factory.CreateBytesValue(value.ToString());
},
registry);
}
absl::Status RegisterDoubleConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions&) {
absl::Status status =
UnaryFunctionAdapter<double, double>::RegisterGlobalOverload(
cel::builtin::kDouble, [](ValueManager&, double v) { return v; },
registry);
CEL_RETURN_IF_ERROR(status);
status = UnaryFunctionAdapter<double, int64_t>::RegisterGlobalOverload(
cel::builtin::kDouble,
[](ValueManager&, int64_t v) { return static_cast<double>(v); },
registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kDouble,
[](ValueManager& value_factory, const StringValue& s) -> Value {
double result;
if (absl::SimpleAtod(s.ToString(), &result)) {
return value_factory.CreateDoubleValue(result);
} else {
return value_factory.CreateErrorValue(absl::InvalidArgumentError(
"cannot convert string to double"));
}
},
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<double, uint64_t>::RegisterGlobalOverload(
cel::builtin::kDouble,
[](ValueManager&, uint64_t v) { return static_cast<double>(v); },
registry);
}
Value CreateDurationFromString(ValueManager& value_factory,
const StringValue& dur_str) {
absl::Duration d;
if (!absl::ParseDuration(dur_str.ToString(), &d)) {
return value_factory.CreateErrorValue(
absl::InvalidArgumentError("String to Duration conversion failed"));
}
auto duration = value_factory.CreateDurationValue(d);
if (!duration.ok()) {
return value_factory.CreateErrorValue(duration.status());
}
return *duration;
}
absl::Status RegisterTimeConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
absl::Status status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kDuration, CreateDurationFromString, registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, int64_t>::RegisterGlobalOverload(
cel::builtin::kTimestamp,
[](ValueManager& value_factory, int64_t epoch_seconds) -> Value {
return value_factory.CreateUncheckedTimestampValue(
absl::FromUnixSeconds(epoch_seconds));
},
registry);
CEL_RETURN_IF_ERROR(status);
bool enable_timestamp_duration_overflow_errors =
options.enable_timestamp_duration_overflow_errors;
return UnaryFunctionAdapter<Value, const StringValue&>::
RegisterGlobalOverload(
cel::builtin::kTimestamp,
[=](ValueManager& value_factory,
const StringValue& time_str) -> Value {
absl::Time ts;
if (!absl::ParseTime(absl::RFC3339_full, time_str.ToString(), &ts,
nullptr)) {
return value_factory.CreateErrorValue(absl::InvalidArgumentError(
"String to Timestamp conversion failed"));
}
if (enable_timestamp_duration_overflow_errors) {
if (ts < absl::UniversalEpoch() || ts > kMaxTime) {
return value_factory.CreateErrorValue(
absl::OutOfRangeError("timestamp overflow"));
}
}
return value_factory.CreateUncheckedTimestampValue(ts);
},
registry);
}
}
absl::Status RegisterTypeConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(RegisterBytesConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterDoubleConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterIntConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterStringConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterUintConversionFunctions(registry, options));
CEL_RETURN_IF_ERROR(RegisterTimeConversionFunctions(registry, options));
absl::Status status =
UnaryFunctionAdapter<Value, const Value&>::RegisterGlobalOverload(
cel::builtin::kDyn,
[](ValueManager&, const Value& value) -> Value { return value; },
registry);
CEL_RETURN_IF_ERROR(status);
return UnaryFunctionAdapter<Value, const Value&>::RegisterGlobalOverload(
cel::builtin::kType,
[](ValueManager& factory, const Value& value) {
return factory.CreateTypeValue(value.GetType(factory));
},
registry);
}
} | #include "runtime/standard/type_conversion_functions.h"
#include <vector>
#include "base/builtins.h"
#include "base/function_descriptor.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::IsEmpty;
using testing::UnorderedElementsAre;
MATCHER_P3(MatchesUnaryDescriptor, name, receiver, expected_kind, "") {
const FunctionDescriptor& descriptor = arg.descriptor;
std::vector<Kind> types{expected_kind};
return descriptor.name() == name && descriptor.receiver_style() == receiver &&
descriptor.types() == types;
}
TEST(RegisterTypeConversionFunctions, RegisterIntConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kInt, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kBool),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kString),
MatchesUnaryDescriptor(builtin::kInt, false, Kind::kTimestamp)));
}
TEST(RegisterTypeConversionFunctions, RegisterUintConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kUint, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kUint, false, Kind::kString)));
}
TEST(RegisterTypeConversionFunctions, RegisterDoubleConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kDouble, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kDouble, false, Kind::kString)));
}
TEST(RegisterTypeConversionFunctions, RegisterStringConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_string_conversion = true;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kString, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kString, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kDouble),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kUint),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kString),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kBytes),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kDuration),
MatchesUnaryDescriptor(builtin::kString, false, Kind::kTimestamp)));
}
TEST(RegisterTypeConversionFunctions,
RegisterStringConversionFunctionsDisabled) {
FunctionRegistry registry;
RuntimeOptions options;
options.enable_string_conversion = false;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kString, false, {Kind::kAny}),
IsEmpty());
}
TEST(RegisterTypeConversionFunctions, RegisterBytesConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kBytes, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kBytes, false, Kind::kBytes),
MatchesUnaryDescriptor(builtin::kBytes, false, Kind::kString)));
}
TEST(RegisterTypeConversionFunctions, RegisterTimeConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kTimestamp, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kTimestamp, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kTimestamp, false, Kind::kString)));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kDuration, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kDuration, false, Kind::kString)));
}
TEST(RegisterTypeConversionFunctions, RegisterMetaTypeConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(registry.FindStaticOverloads(builtin::kDyn, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kDyn, false, Kind::kAny)));
EXPECT_THAT(registry.FindStaticOverloads(builtin::kType, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kType, false, Kind::kAny)));
}
}
} | absl::Status RegisterTimeConversionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
absl::Status status =
UnaryFunctionAdapter<Value, const StringValue&>::RegisterGlobalOverload(
cel::builtin::kDuration, CreateDurationFromString, registry);
CEL_RETURN_IF_ERROR(status);
status =
UnaryFunctionAdapter<Value, int64_t>::RegisterGlobalOverload(
cel::builtin::kTimestamp,
[](ValueManager& value_factory, int64_t epoch_seconds) -> Value {
return value_factory.CreateUncheckedTimestampValue(
absl::FromUnixSeconds(epoch_seconds));
},
registry);
CEL_RETURN_IF_ERROR(status);
bool enable_timestamp_duration_overflow_errors =
options.enable_timestamp_duration_overflow_errors;
return UnaryFunctionAdapter<Value, const StringValue&>::
RegisterGlobalOverload(
cel::builtin::kTimestamp,
[=](ValueManager& value_factory,
const StringValue& time_str) -> Value {
absl::Time ts;
if (!absl::ParseTime(absl::RFC3339_full, time_str.ToString(), &ts,
nullptr)) {
return value_factory.CreateErrorValue(absl::InvalidArgumentError(
"String to Timestamp conversion failed"));
}
if (enable_timestamp_duration_overflow_errors) {
if (ts < absl::UniversalEpoch() || ts > kMaxTime) {
return value_factory.CreateErrorValue(
absl::OutOfRangeError("timestamp overflow"));
}
}
return value_factory.CreateUncheckedTimestampValue(ts);
},
registry);
} | TEST(RegisterTypeConversionFunctions, RegisterTimeConversionFunctions) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterTypeConversionFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kTimestamp, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kTimestamp, false, Kind::kInt),
MatchesUnaryDescriptor(builtin::kTimestamp, false, Kind::kString)));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kDuration, false, {Kind::kAny}),
UnorderedElementsAre(
MatchesUnaryDescriptor(builtin::kDuration, false, Kind::kString)));
} |
#ifndef ABSL_RANDOM_DISTRIBUTIONS_H_
#define ABSL_RANDOM_DISTRIBUTIONS_H_
#include <limits>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/base/internal/inline_variable.h"
#include "absl/meta/type_traits.h"
#include "absl/random/bernoulli_distribution.h"
#include "absl/random/beta_distribution.h"
#include "absl/random/exponential_distribution.h"
#include "absl/random/gaussian_distribution.h"
#include "absl/random/internal/distribution_caller.h"
#include "absl/random/internal/traits.h"
#include "absl/random/internal/uniform_helper.h"
#include "absl/random/log_uniform_int_distribution.h"
#include "absl/random/poisson_distribution.h"
#include "absl/random/uniform_int_distribution.h"
#include "absl/random/uniform_real_distribution.h"
#include "absl/random/zipf_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosedClosed,
{});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosed, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedOpenTag, IntervalClosedOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpenOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenClosedTag, IntervalOpenClosed, {});
template <typename R = void, typename TagType, typename URBG>
typename absl::enable_if_t<!std::is_same<R, void>::value, R>
Uniform(TagType tag,
URBG&& urbg,
R lo, R hi) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
auto a = random_internal::uniform_lower_bound(tag, lo, hi);
auto b = random_internal::uniform_upper_bound(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, tag, lo, hi);
}
template <typename R = void, typename URBG>
typename absl::enable_if_t<!std::is_same<R, void>::value, R>
Uniform(URBG&& urbg,
R lo, R hi) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
constexpr auto tag = absl::IntervalClosedOpen;
auto a = random_internal::uniform_lower_bound(tag, lo, hi);
auto b = random_internal::uniform_upper_bound(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lo, hi);
}
template <typename R = void, typename TagType, typename URBG, typename A,
typename B>
typename absl::enable_if_t<std::is_same<R, void>::value,
random_internal::uniform_inferred_return_t<A, B>>
Uniform(TagType tag,
URBG&& urbg,
A lo, B hi) {
using gen_t = absl::decay_t<URBG>;
using return_t = typename random_internal::uniform_inferred_return_t<A, B>;
using distribution_t = random_internal::UniformDistributionWrapper<return_t>;
auto a = random_internal::uniform_lower_bound<return_t>(tag, lo, hi);
auto b = random_internal::uniform_upper_bound<return_t>(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, tag, static_cast<return_t>(lo),
static_cast<return_t>(hi));
}
template <typename R = void, typename URBG, typename A, typename B>
typename absl::enable_if_t<std::is_same<R, void>::value,
random_internal::uniform_inferred_return_t<A, B>>
Uniform(URBG&& urbg,
A lo, B hi) {
using gen_t = absl::decay_t<URBG>;
using return_t = typename random_internal::uniform_inferred_return_t<A, B>;
using distribution_t = random_internal::UniformDistributionWrapper<return_t>;
constexpr auto tag = absl::IntervalClosedOpen;
auto a = random_internal::uniform_lower_bound<return_t>(tag, lo, hi);
auto b = random_internal::uniform_upper_bound<return_t>(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, static_cast<return_t>(lo),
static_cast<return_t>(hi));
}
template <typename R, typename URBG>
typename absl::enable_if_t<!std::numeric_limits<R>::is_signed, R>
Uniform(URBG&& urbg) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg);
}
template <typename URBG>
bool Bernoulli(URBG&& urbg,
double p) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = absl::bernoulli_distribution;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, p);
}
template <typename RealType, typename URBG>
RealType Beta(URBG&& urbg,
RealType alpha, RealType beta) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Beta<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::beta_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, alpha, beta);
}
template <typename RealType, typename URBG>
RealType Exponential(URBG&& urbg,
RealType lambda = 1) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Exponential<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::exponential_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lambda);
}
template <typename RealType, typename URBG>
RealType Gaussian(URBG&& urbg,
RealType mean = 0, RealType stddev = 1) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Gaussian<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::gaussian_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, mean, stddev);
}
template <typename IntType, typename URBG>
IntType LogUniform(URBG&& urbg,
IntType lo, IntType hi, IntType base = 2) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::LogUniform<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::log_uniform_int_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lo, hi, base);
}
template <typename IntType, typename URBG>
IntType Poisson(URBG&& urbg,
double mean = 1.0) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Poisson<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::poisson_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, mean);
}
template <typename IntType, typename URBG>
IntType Zipf(URBG&& urbg,
IntType hi = (std::numeric_limits<IntType>::max)(), double q = 2.0,
double v = 1.0) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Zipf<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::zipf_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, hi, q, v);
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/distributions.h"
#include <cfloat>
#include <cmath>
#include <cstdint>
#include <limits>
#include <type_traits>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/random.h"
namespace {
constexpr int kSize = 400000;
class RandomDistributionsTest : public testing::Test {};
struct Invalid {};
template <typename A, typename B>
auto InferredUniformReturnT(int)
-> decltype(absl::Uniform(std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename>
Invalid InferredUniformReturnT(...);
template <typename TagType, typename A, typename B>
auto InferredTaggedUniformReturnT(int)
-> decltype(absl::Uniform(std::declval<TagType>(),
std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename>
Invalid InferredTaggedUniformReturnT(...);
template <typename A, typename B, typename Expect>
void CheckArgsInferType() {
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredUniformReturnT<A, B>(0))>,
std::is_same<Expect,
decltype(InferredUniformReturnT<B, A>(0))>>::value,
"");
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredTaggedUniformReturnT<
absl::IntervalOpenOpenTag, A, B>(0))>,
std::is_same<Expect,
decltype(InferredTaggedUniformReturnT<
absl::IntervalOpenOpenTag, B, A>(0))>>::value,
"");
}
template <typename A, typename B, typename ExplicitRet>
auto ExplicitUniformReturnT(int) -> decltype(absl::Uniform<ExplicitRet>(
std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename ExplicitRet>
Invalid ExplicitUniformReturnT(...);
template <typename TagType, typename A, typename B, typename ExplicitRet>
auto ExplicitTaggedUniformReturnT(int)
-> decltype(absl::Uniform<ExplicitRet>(
std::declval<TagType>(), std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename, typename ExplicitRet>
Invalid ExplicitTaggedUniformReturnT(...);
template <typename A, typename B, typename Expect>
void CheckArgsReturnExpectedType() {
static_assert(
absl::conjunction<
std::is_same<Expect,
decltype(ExplicitUniformReturnT<A, B, Expect>(0))>,
std::is_same<Expect, decltype(ExplicitUniformReturnT<B, A, Expect>(
0))>>::value,
"");
static_assert(
absl::conjunction<
std::is_same<Expect,
decltype(ExplicitTaggedUniformReturnT<
absl::IntervalOpenOpenTag, A, B, Expect>(0))>,
std::is_same<Expect, decltype(ExplicitTaggedUniformReturnT<
absl::IntervalOpenOpenTag, B, A,
Expect>(0))>>::value,
"");
}
template <typename R>
auto UniformNoBoundsReturnT(int)
-> decltype(absl::Uniform<R>(std::declval<absl::InsecureBitGen&>()));
template <typename>
Invalid UniformNoBoundsReturnT(...);
TEST_F(RandomDistributionsTest, UniformTypeInference) {
CheckArgsInferType<uint16_t, uint16_t, uint16_t>();
CheckArgsInferType<uint32_t, uint32_t, uint32_t>();
CheckArgsInferType<uint64_t, uint64_t, uint64_t>();
CheckArgsInferType<int16_t, int16_t, int16_t>();
CheckArgsInferType<int32_t, int32_t, int32_t>();
CheckArgsInferType<int64_t, int64_t, int64_t>();
CheckArgsInferType<float, float, float>();
CheckArgsInferType<double, double, double>();
CheckArgsReturnExpectedType<int16_t, int16_t, int32_t>();
CheckArgsReturnExpectedType<uint16_t, uint16_t, int32_t>();
CheckArgsReturnExpectedType<int16_t, int16_t, int64_t>();
CheckArgsReturnExpectedType<int16_t, int32_t, int64_t>();
CheckArgsReturnExpectedType<int16_t, int32_t, double>();
CheckArgsReturnExpectedType<float, float, double>();
CheckArgsReturnExpectedType<int, int, int16_t>();
CheckArgsInferType<uint16_t, uint32_t, uint32_t>();
CheckArgsInferType<uint16_t, uint64_t, uint64_t>();
CheckArgsInferType<uint16_t, int32_t, int32_t>();
CheckArgsInferType<uint16_t, int64_t, int64_t>();
CheckArgsInferType<uint16_t, float, float>();
CheckArgsInferType<uint16_t, double, double>();
CheckArgsInferType<int16_t, int32_t, int32_t>();
CheckArgsInferType<int16_t, int64_t, int64_t>();
CheckArgsInferType<int16_t, float, float>();
CheckArgsInferType<int16_t, double, double>();
CheckArgsInferType<uint16_t, int16_t, Invalid>();
CheckArgsInferType<int16_t, uint32_t, Invalid>();
CheckArgsInferType<int16_t, uint64_t, Invalid>();
CheckArgsInferType<uint32_t, uint64_t, uint64_t>();
CheckArgsInferType<uint32_t, int64_t, int64_t>();
CheckArgsInferType<uint32_t, double, double>();
CheckArgsInferType<int32_t, int64_t, int64_t>();
CheckArgsInferType<int32_t, double, double>();
CheckArgsInferType<uint32_t, int32_t, Invalid>();
CheckArgsInferType<int32_t, uint64_t, Invalid>();
CheckArgsInferType<int32_t, float, Invalid>();
CheckArgsInferType<uint32_t, float, Invalid>();
CheckArgsInferType<uint64_t, int64_t, Invalid>();
CheckArgsInferType<int64_t, float, Invalid>();
CheckArgsInferType<int64_t, double, Invalid>();
CheckArgsInferType<float, double, double>();
}
TEST_F(RandomDistributionsTest, UniformExamples) {
absl::InsecureBitGen gen;
EXPECT_NE(1, absl::Uniform(gen, static_cast<uint16_t>(0), 1.0f));
EXPECT_NE(1, absl::Uniform(gen, 0, 1.0));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen,
static_cast<uint16_t>(0), 1.0f));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen, 0, 1.0));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen, -1, 1.0));
EXPECT_NE(1, absl::Uniform<double>(absl::IntervalOpenOpen, gen, -1, 1));
EXPECT_NE(1, absl::Uniform<float>(absl::IntervalOpenOpen, gen, 0, 1));
EXPECT_NE(1, absl::Uniform<float>(gen, 0, 1));
}
TEST_F(RandomDistributionsTest, UniformNoBounds) {
absl::InsecureBitGen gen;
absl::Uniform<uint8_t>(gen);
absl::Uniform<uint16_t>(gen);
absl::Uniform<uint32_t>(gen);
absl::Uniform<uint64_t>(gen);
absl::Uniform<absl::uint128>(gen);
testing::StaticAssertTypeEq<uint8_t,
decltype(UniformNoBoundsReturnT<uint8_t>(0))>();
testing::StaticAssertTypeEq<uint16_t,
decltype(UniformNoBoundsReturnT<uint16_t>(0))>();
testing::StaticAssertTypeEq<uint32_t,
decltype(UniformNoBoundsReturnT<uint32_t>(0))>();
testing::StaticAssertTypeEq<uint64_t,
decltype(UniformNoBoundsReturnT<uint64_t>(0))>();
testing::StaticAssertTypeEq<
absl::uint128, decltype(UniformNoBoundsReturnT<absl::uint128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int8_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int16_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int32_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int64_t>(0))>();
testing::StaticAssertTypeEq<
Invalid, decltype(UniformNoBoundsReturnT<absl::int128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<float>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<double>(0))>();
}
TEST_F(RandomDistributionsTest, UniformNonsenseRanges) {
#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
GTEST_SKIP()
<< "Skipping the test because we detected x87 floating-point semantics";
#endif
absl::InsecureBitGen gen;
EXPECT_EQ(0, absl::Uniform<uint64_t>(gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<uint64_t>(gen, 1, 0));
EXPECT_EQ(0, absl::Uniform<uint64_t>(absl::IntervalOpenOpen, gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<uint64_t>(absl::IntervalOpenOpen, gen, 1, 0));
constexpr auto m = (std::numeric_limits<uint64_t>::max)();
EXPECT_EQ(m, absl::Uniform(gen, m, m));
EXPECT_EQ(m, absl::Uniform(gen, m, m - 1));
EXPECT_EQ(m - 1, absl::Uniform(gen, m - 1, m));
EXPECT_EQ(m, absl::Uniform(absl::IntervalOpenOpen, gen, m, m));
EXPECT_EQ(m, absl::Uniform(absl::IntervalOpenOpen, gen, m, m - 1));
EXPECT_EQ(m - 1, absl::Uniform(absl::IntervalOpenOpen, gen, m - 1, m));
EXPECT_EQ(0, absl::Uniform<int64_t>(gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<int64_t>(gen, 1, 0));
EXPECT_EQ(0, absl::Uniform<int64_t>(absl::IntervalOpenOpen, gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<int64_t>(absl::IntervalOpenOpen, gen, 1, 0));
constexpr auto l = (std::numeric_limits<int64_t>::min)();
constexpr auto r = (std::numeric_limits<int64_t>::max)();
EXPECT_EQ(l, absl::Uniform(gen, l, l));
EXPECT_EQ(r, absl::Uniform(gen, r, r));
EXPECT_EQ(r, absl::Uniform(gen, r, r - 1));
EXPECT_EQ(r - 1, absl::Uniform(gen, r - 1, r));
EXPECT_EQ(l, absl::Uniform(absl::IntervalOpenOpen, gen, l, l));
EXPECT_EQ(r, absl::Uniform(absl::IntervalOpenOpen, gen, r, r));
EXPECT_EQ(r, absl::Uniform(absl::IntervalOpenOpen, gen, r, r - 1));
EXPECT_EQ(r - 1, absl::Uniform(absl::IntervalOpenOpen, gen, r - 1, r));
const double e = std::nextafter(1.0, 2.0);
const double f = std::nextafter(1.0, 0.0);
const double g = std::numeric_limits<double>::denorm_min();
EXPECT_EQ(1.0, absl::Uniform(gen, 1.0, e));
EXPECT_EQ(1.0, absl::Uniform(gen, 1.0, f));
EXPECT_EQ(0.0, absl::Uniform(gen, 0.0, g));
EXPECT_EQ(e, absl::Uniform(absl::IntervalOpenOpen, gen, 1.0, e));
EXPECT_EQ(f, absl::Uniform(absl::IntervalOpenOpen, gen, 1.0, f));
EXPECT_EQ(g, absl::Uniform(absl::IntervalOpenOpen, gen, 0.0, g));
}
TEST_F(RandomDistributionsTest, UniformReal) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Uniform(gen, 0, 1.0);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.5, moments.mean, 0.02);
EXPECT_NEAR(1 / 12.0, moments.variance, 0.02);
EXPECT_NEAR(0.0, moments.skewness, 0.02);
EXPECT_NEAR(9 / 5.0, moments.kurtosis, 0.02);
}
TEST_F(RandomDistributionsTest, UniformInt) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
const int64_t kMax = 1000000000000ll;
int64_t j = absl::Uniform(absl::IntervalClosedClosed, gen, 0, kMax);
values[i] = static_cast<double>(j) / static_cast<double>(kMax);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.5, moments.mean, 0.02);
EXPECT_NEAR(1 / 12.0, moments.variance, 0.02);
EXPECT_NEAR(0.0, moments.skewness, 0.02);
EXPECT_NEAR(9 / 5.0, moments.kurtosis, 0.02);
}
TEST_F(RandomDistributionsTest, Exponential) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Exponential<double>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(1.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.025);
EXPECT_NEAR(2.0, moments.skewness, 0.1);
EXPECT_LT(5.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, PoissonDefault) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(1.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.02);
EXPECT_NEAR(1.0, moments.skewness, 0.025);
EXPECT_LT(2.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, PoissonLarge) {
constexpr double kMean = 100000000.0;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen, kMean);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(kMean, moments.mean, kMean * 0.015);
EXPECT_NEAR(kMean, moments.variance, kMean * 0.015);
EXPECT_NEAR(std::sqrt(kMean), moments.skewness, kMean * 0.02);
EXPECT_LT(2.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, Bernoulli) {
constexpr double kP = 0.5151515151;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Bernoulli(gen, kP);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(kP, moments.mean, 0.01);
}
TEST_F(RandomDistributionsTest, Beta) {
constexpr double kAlpha = 2.0;
constexpr double kBeta = 3.0;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Beta(gen, kAlpha, kBeta);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.4, moments.mean, 0.01);
}
TEST_F(RandomDistributionsTest, Zipf) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Zipf<int64_t>(gen, 100);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(6.5944, moments.mean, 2000) << moments;
}
TEST_F(RandomDistributionsTest, Gaussian) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Gaussian<double>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.04);
EXPECT_NEAR(0, moments.skewness, 0.2);
EXPECT_NEAR(3.0, moments.kurtosis, 0.5);
}
TEST_F(RandomDistributionsTest, LogUniform) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::LogUniform<int64_t>(gen, 0, (1 << 10) - 1);
}
const double mean = (0 + 1 + 1 + 2 + 3 + 4 + 7 + 8 + 15 + 16 + 31 + 32 + 63 +
64 + 127 + 128 + 255 + 256 + 511 + 512 + 1023) /
(2.0 * 11.0);
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(mean, moments.mean, 2) << moments;
}
} | template <typename IntType, typename URBG>
IntType Poisson(URBG&& urbg,
double mean = 1.0) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Poisson<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::poisson_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, mean);
} | TEST_F(RandomDistributionsTest, PoissonDefault) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(1.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.02);
EXPECT_NEAR(1.0, moments.skewness, 0.025);
EXPECT_LT(2.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, PoissonLarge) {
constexpr double kMean = 100000000.0;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen, kMean);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(kMean, moments.mean, kMean * 0.015);
EXPECT_NEAR(kMean, moments.variance, kMean * 0.015);
EXPECT_NEAR(std::sqrt(kMean), moments.skewness, kMean * 0.02);
EXPECT_LT(2.0, moments.kurtosis);
} |
#include "tensorflow/core/common_runtime/buf_rendezvous.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
namespace tensorflow {
namespace {
void DeregisterCancellation(BufRendezvous::Hook* h) {
if (h->cancellation_manager != nullptr) {
h->cancellation_manager->DeregisterCallback(h->cancellation_token);
h->cancellation_manager = nullptr;
h->cancellation_token = CancellationManager::kInvalidToken;
}
}
}
BufRendezvous::~BufRendezvous() {
mutex_lock l(mu_);
if (!hook_table_.empty()) {
PurgeTable(errors::Internal("Delete called on non-empty BufRendezvous"),
&hook_table_);
}
}
void BufRendezvous::StartAbort(const Status& s) {
CHECK(!s.ok());
HookTable dummy_table;
{
mutex_lock l(mu_);
status_.Update(StatusGroup::MakeDerived(s));
hook_table_.swap(dummy_table);
}
PurgeTable(s, &dummy_table);
}
void BufRendezvous::PurgeTable(const Status& s, HookTable* table) {
for (auto& it : *table) {
Hook* h = it.second;
if (h->cancellation_manager != nullptr) {
h->cancellation_manager->TryDeregisterCallback(h->cancellation_token);
}
if (h->cons_cb != nullptr) {
h->cons_cb(s, nullptr);
}
if (h->prod_cb != nullptr) {
h->prod_cb(s);
}
delete h;
}
table->clear();
}
string BufRendezvous::Hook::DebugString() const {
return absl::StrCat(
"[dev:", (prod_dev ? prod_dev->name() : "none"),
", ctx:", reinterpret_cast<uint64>(prod_ctx),
", val:", reinterpret_cast<uint64>(prod_value),
", pcb:", prod_cb ? reinterpret_cast<uint64>(&prod_cb) : 0,
", ccb:", cons_cb ? reinterpret_cast<uint64>(&cons_cb) : 0, "]");
}
void BufRendezvous::ProvideBuf(const string& key, Device* dev,
DeviceContext* dev_ctx, const Tensor* v,
const AllocatorAttributes& attr,
const ProducerCallback& done,
CancellationManager* cancellation_manager) {
DVLOG(4) << "ProvideBuf: key = " << key;
#ifndef NDEBUG
if (VLOG_IS_ON(4)) {
LogContents();
}
#endif
Hook* h = nullptr;
Status providebuf_status;
do {
mutex_lock l(mu_);
if (!status_.ok()) {
providebuf_status = status_;
break;
} else {
CancellationToken cancellation_token = CancellationManager::kInvalidToken;
auto it = hook_table_.find(key);
if (it == hook_table_.end()) {
if (cancellation_manager != nullptr) {
cancellation_token = cancellation_manager->get_cancellation_token();
}
h = new Hook(cancellation_manager, cancellation_token);
it = hook_table_.insert(std::make_pair(key, h)).first;
} else {
if (it->second->prod_cb != nullptr) {
providebuf_status = errors::Internal(
"BufRendezvous::ProvideBuf already called for key ", key);
break;
}
h = it->second;
}
h->prod_dev = dev;
h->prod_ctx = dev_ctx;
h->prod_value = v;
h->prod_attr = attr;
h->prod_cb = done;
if (h->cons_cb != nullptr) {
hook_table_.erase(it);
} else {
if (cancellation_manager != nullptr &&
!cancellation_manager->RegisterCallback(
cancellation_token, [this, key]() { CancelHook(key); })) {
providebuf_status = errors::Cancelled(
"Operation was cancelled for BufRendezvous key ", key);
hook_table_.erase(it);
delete h;
}
h = nullptr;
}
}
} while (false);
if (h) {
DVLOG(4) << "ProvideBuf: key = " << key << ": calling cons_cb"
<< h->DebugString();
DeregisterCancellation(h);
h->cons_cb(absl::OkStatus(), h);
}
if (!providebuf_status.ok()) {
done(providebuf_status);
}
}
void BufRendezvous::ConsumeBuf(const string& key, const string& device_name,
const uint64 device_incarnation,
const ConsumerCallback& done,
CancellationManager* cancellation_manager) {
DVLOG(4) << "ConsumeBuf: key = " << key << " device_name = " << device_name;
#ifndef NDEBUG
if (VLOG_IS_ON(4)) {
LogContents();
}
#endif
Device* device;
Status consumebuf_status = dev_mgr_->LookupDevice(device_name, &device);
if (consumebuf_status.ok() &&
device->attributes().incarnation() != device_incarnation) {
consumebuf_status = errors::FailedPrecondition(
"RecvBuf expects a different device incarnation: ", device_incarnation,
" vs. ", device->attributes().incarnation(),
". Your worker job that contains the device (\"", device_name,
"\") was probably restarted. Check your "
"worker job for the reason why it was restarted.");
}
if (!consumebuf_status.ok()) {
done(consumebuf_status, nullptr);
return;
}
Hook* existing_hook = nullptr;
do {
mutex_lock l(mu_);
if (!status_.ok()) {
consumebuf_status = status_;
break;
}
auto it = hook_table_.find(key);
if (it != hook_table_.end()) {
if (it->second->cons_cb) {
consumebuf_status =
errors::Internal("Second consumer arrived for key ", key);
break;
}
existing_hook = it->second;
hook_table_.erase(it);
existing_hook->cons_cb = done;
} else {
CancellationToken cancellation_token = CancellationManager::kInvalidToken;
bool already_cancelled = false;
if (cancellation_manager != nullptr) {
cancellation_token = cancellation_manager->get_cancellation_token();
already_cancelled = !cancellation_manager->RegisterCallback(
cancellation_token, [this, key]() { CancelHook(key); });
}
if (already_cancelled) {
consumebuf_status = errors::Cancelled(
"Operation was cancelled for BufRendezvous key ", key);
} else {
Hook* h = new Hook(cancellation_manager, cancellation_token);
h->cons_cb = done;
it = hook_table_.insert(std::make_pair(key, h)).first;
return;
}
}
} while (false);
if (existing_hook) {
DVLOG(4) << "ConsumeBuf: key = " << key << ": calling cons_cb"
<< existing_hook->DebugString();
DeregisterCancellation(existing_hook);
existing_hook->cons_cb(absl::OkStatus(), existing_hook);
return;
}
if (!consumebuf_status.ok()) {
done(consumebuf_status, nullptr);
return;
}
}
void BufRendezvous::CancelHook(const string& key) {
Hook* h = nullptr;
{
mutex_lock l(mu_);
auto it = hook_table_.find(key);
if (it == hook_table_.end()) return;
h = it->second;
hook_table_.erase(it);
}
if (h != nullptr) {
auto s = errors::Cancelled("Operation was cancelled for BufRendezvous key ",
key);
if (h->prod_cb != nullptr) {
h->prod_cb(s);
}
if (h->cons_cb != nullptr) {
h->cons_cb(s, nullptr);
}
delete h;
}
}
void BufRendezvous::DoneWithHook(Hook* h) {
h->prod_cb(absl::OkStatus());
delete h;
}
void BufRendezvous::LogContents() {
mutex_lock l(mu_);
LOG(INFO) << strings::StrCat("BufRendezvous ",
strings::Hex(reinterpret_cast<uint64>(this)),
" step_id=", step_id_, " current contents:");
for (const auto& it : hook_table_) {
LOG(INFO) << it.first << ":" << it.second->DebugString();
}
}
} | #include "tensorflow/core/common_runtime/buf_rendezvous.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class BufRendezvousTest : public ::testing::Test {
protected:
static std::unique_ptr<Device> NewDevice(const string& name,
const string& type,
const uint64 incarnation) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attrs)
: Device(nullptr, attrs) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attrs;
attrs.set_name(name);
attrs.set_device_type(type);
attrs.set_incarnation(incarnation);
return std::make_unique<FakeDevice>(attrs);
}
void InitializeDevice(const string& device, const string& type,
const uint64 incarnation) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(NewDevice(device, type, incarnation));
dev_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
br_ = std::make_unique<BufRendezvous>(123, dev_mgr_.get());
}
BufRendezvousTest()
: a_(Tensor(DT_FLOAT, TensorShape({24}))),
b_(Tensor(DT_FLOAT, TensorShape({24}))),
fake_device_context_(reinterpret_cast<DeviceContext*>(1024LLU)) {
InitializeDevice(*kDefaultDeviceName, "CPU", kDefaultIncarnation);
TF_CHECK_OK(dev_mgr_->LookupDevice(*kDefaultDeviceName, &default_device_));
}
Tensor a_;
Tensor b_;
AllocatorAttributes aa_;
Device* default_device_;
DeviceContext* fake_device_context_;
std::unique_ptr<DeviceMgr> dev_mgr_;
std::unique_ptr<BufRendezvous> br_;
CancellationManager cm_;
static const string* const kDefaultKey;
static const string* const kDefaultDeviceName;
static const uint64 kDefaultIncarnation;
};
const string* const BufRendezvousTest::kDefaultKey = new string("key0");
const string* const BufRendezvousTest::kDefaultDeviceName =
new string("/device:CPU:0");
const uint64 BufRendezvousTest::kDefaultIncarnation = 12345;
TEST_F(BufRendezvousTest, CorrectUseProducerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CorrectUseConsumerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_FALSE(cons_callback_called);
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, ErrorDuplicatePut) {
bool prod_callback_called = false;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_callback_called](const Status& s) { prod_callback_called = true; },
&cm_);
Status bad_status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&bad_status, ¬e](const Status& s) {
bad_status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_FALSE(bad_status.ok());
EXPECT_EQ(absl::StrCat("BufRendezvous::ProvideBuf already called for key ",
*kDefaultKey),
bad_status.message());
EXPECT_FALSE(prod_callback_called);
br_.reset();
}
TEST_F(BufRendezvousTest, ErrorDeleteNonEmpty) {
Status cons_status;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
EXPECT_EQ(h, nullptr);
},
&cm_);
EXPECT_TRUE(cons_status.ok());
br_.reset();
EXPECT_FALSE(cons_status.ok());
EXPECT_EQ("Delete called on non-empty BufRendezvous", cons_status.message());
}
TEST_F(BufRendezvousTest, AbortNonEmpty) {
Status cons_status;
Status prod_status;
Notification prod_note;
Notification cons_note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
cons_note.Notify();
},
&cm_);
br_->ProvideBuf(
"key1", default_device_, fake_device_context_, &a_, aa_,
[&prod_note, &prod_status](const Status& s) {
prod_status = s;
prod_note.Notify();
},
&cm_);
br_->StartAbort(errors::Internal("Falling sky detected"));
prod_note.WaitForNotification();
cons_note.WaitForNotification();
EXPECT_FALSE(prod_status.ok());
EXPECT_EQ(prod_status.message(), "Falling sky detected");
EXPECT_FALSE(cons_status.ok());
EXPECT_EQ(cons_status.message(), "Falling sky detected");
}
TEST_F(BufRendezvousTest, AbortEmpty) {
br_->StartAbort(errors::Internal("Falling sky detected"));
}
TEST_F(BufRendezvousTest, UseAfterAbort) {
br_->StartAbort(errors::Internal("Falling sky detected"));
Status cons_status;
Status prod_status;
Notification prod_note;
Notification cons_note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
cons_note.Notify();
},
&cm_);
br_->ProvideBuf(
"key1", default_device_, fake_device_context_, &a_, aa_,
[&prod_note, &prod_status](const Status& s) {
prod_status = s;
prod_note.Notify();
},
&cm_);
prod_note.WaitForNotification();
cons_note.WaitForNotification();
EXPECT_FALSE(prod_status.ok());
EXPECT_NE(prod_status.message().find("Falling sky detected"), string::npos);
EXPECT_FALSE(cons_status.ok());
EXPECT_NE(cons_status.message().find("Falling sky detected"), string::npos);
}
TEST_F(BufRendezvousTest, DeviceIncarnationMismatch) {
Status cons_status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[](const Status&) {}, nullptr);
const uint64 incorrect_incarnation = 23456;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, incorrect_incarnation,
[¬e, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
note.Notify();
},
nullptr);
note.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(cons_status));
}
TEST_F(BufRendezvousTest, ProvideThenCancel) {
Status status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
cm_.StartCancel();
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, CancelThenProvide) {
Status status;
Notification note;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, ConsumeThenCancel) {
Status status;
Notification note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&status, ¬e](const Status& s, BufRendezvous::Hook* h) {
status = s;
note.Notify();
},
&cm_);
cm_.StartCancel();
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, CancelThenConsume) {
Status status;
Notification note;
cm_.StartCancel();
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&status, ¬e](const Status& s, BufRendezvous::Hook* h) {
status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, ProvideConsumeThenCancel) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
note.WaitForNotification();
cm_.StartCancel();
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CancelThenProvideConsume) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
EXPECT_TRUE(errors::IsCancelled(prod_status));
prod_callback_called = true;
},
&cm_);
EXPECT_TRUE(prod_callback_called);
EXPECT_TRUE(errors::IsCancelled(prod_status));
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
EXPECT_TRUE(errors::IsCancelled(cons_status));
cons_callback_called = true;
},
&cm_);
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(errors::IsCancelled(cons_status));
}
}
} | void BufRendezvous::ProvideBuf(const string& key, Device* dev,
DeviceContext* dev_ctx, const Tensor* v,
const AllocatorAttributes& attr,
const ProducerCallback& done,
CancellationManager* cancellation_manager) {
DVLOG(4) << "ProvideBuf: key = " << key;
#ifndef NDEBUG
if (VLOG_IS_ON(4)) {
LogContents();
}
#endif
Hook* h = nullptr;
Status providebuf_status;
do {
mutex_lock l(mu_);
if (!status_.ok()) {
providebuf_status = status_;
break;
} else {
CancellationToken cancellation_token = CancellationManager::kInvalidToken;
auto it = hook_table_.find(key);
if (it == hook_table_.end()) {
if (cancellation_manager != nullptr) {
cancellation_token = cancellation_manager->get_cancellation_token();
}
h = new Hook(cancellation_manager, cancellation_token);
it = hook_table_.insert(std::make_pair(key, h)).first;
} else {
if (it->second->prod_cb != nullptr) {
providebuf_status = errors::Internal(
"BufRendezvous::ProvideBuf already called for key ", key);
break;
}
h = it->second;
}
h->prod_dev = dev;
h->prod_ctx = dev_ctx;
h->prod_value = v;
h->prod_attr = attr;
h->prod_cb = done;
if (h->cons_cb != nullptr) {
hook_table_.erase(it);
} else {
if (cancellation_manager != nullptr &&
!cancellation_manager->RegisterCallback(
cancellation_token, [this, key]() { CancelHook(key); })) {
providebuf_status = errors::Cancelled(
"Operation was cancelled for BufRendezvous key ", key);
hook_table_.erase(it);
delete h;
}
h = nullptr;
}
}
} while (false);
if (h) {
DVLOG(4) << "ProvideBuf: key = " << key << ": calling cons_cb"
<< h->DebugString();
DeregisterCancellation(h);
h->cons_cb(absl::OkStatus(), h);
}
if (!providebuf_status.ok()) {
done(providebuf_status);
}
} | TEST_F(BufRendezvousTest, CorrectUseProducerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CorrectUseConsumerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_FALSE(cons_callback_called);
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, ErrorDuplicatePut) {
bool prod_callback_called = false;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_callback_called](const Status& s) { prod_callback_called = true; },
&cm_);
Status bad_status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&bad_status, ¬e](const Status& s) {
bad_status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_FALSE(bad_status.ok());
EXPECT_EQ(absl::StrCat("BufRendezvous::ProvideBuf already called for key ",
*kDefaultKey),
bad_status.message());
EXPECT_FALSE(prod_callback_called);
br_.reset();
}
TEST_F(BufRendezvousTest, ProvideThenCancel) {
Status status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
cm_.StartCancel();
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, CancelThenProvide) {
Status status;
Notification note;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, ProvideConsumeThenCancel) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
note.WaitForNotification();
cm_.StartCancel();
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CancelThenProvideConsume) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
EXPECT_TRUE(errors::IsCancelled(prod_status));
prod_callback_called = true;
},
&cm_);
EXPECT_TRUE(prod_callback_called);
EXPECT_TRUE(errors::IsCancelled(prod_status));
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
EXPECT_TRUE(errors::IsCancelled(cons_status));
cons_callback_called = true;
},
&cm_);
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(errors::IsCancelled(cons_status));
} |
#include <cstddef>
#include <string>
#include <utility>
#include "absl/container/fixed_array.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/sized_input_view.h"
#include "common/type.h"
namespace cel {
namespace {
std::string OpaqueDebugString(absl::string_view name,
absl::Span<const Type> parameters) {
if (parameters.empty()) {
return std::string(name);
}
return absl::StrCat(
name, "<", absl::StrJoin(parameters, ", ", absl::StreamFormatter()), ">");
}
absl::FixedArray<Type, 1> SizedInputViewToFixedArray(
const SizedInputView<TypeView>& parameters) {
absl::FixedArray<Type, 1> fixed_parameters(parameters.size());
size_t index = 0;
for (const auto& parameter : parameters) {
fixed_parameters[index++] = Type(parameter);
}
ABSL_DCHECK_EQ(index, parameters.size());
return fixed_parameters;
}
}
OpaqueType::OpaqueType(MemoryManagerRef memory_manager, absl::string_view name,
const SizedInputView<TypeView>& parameters)
: data_(memory_manager.MakeShared<common_internal::OpaqueTypeData>(
std::string(name),
SizedInputViewToFixedArray(std::move(parameters)))) {}
std::string OpaqueType::DebugString() const {
return OpaqueDebugString(name(), parameters());
}
std::string OpaqueTypeView::DebugString() const {
return OpaqueDebugString(name(), parameters());
}
} | #include <sstream>
#include <string>
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/memory_testing.h"
#include "common/native_type.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
using testing::TestParamInfo;
using testing::TestWithParam;
class OpaqueTypeTest : public common_internal::ThreadCompatibleMemoryTest<> {};
TEST_P(OpaqueTypeTest, Kind) {
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}).kind(),
OpaqueType::kKind);
EXPECT_EQ(
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})).kind(),
OpaqueType::kKind);
}
TEST_P(OpaqueTypeTest, Name) {
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}).name(),
"test.Opaque");
EXPECT_EQ(
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})).name(),
"test.Opaque");
}
TEST_P(OpaqueTypeTest, DebugString) {
{
std::ostringstream out;
out << OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}));
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << OpaqueType(memory_manager(), "test.Opaque", {});
EXPECT_EQ(out.str(), "test.Opaque");
}
}
TEST_P(OpaqueTypeTest, Hash) {
EXPECT_EQ(
absl::HashOf(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
absl::HashOf(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
}
TEST_P(OpaqueTypeTest, Equal) {
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}),
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}));
EXPECT_EQ(Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}));
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}),
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
EXPECT_EQ(Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
}
TEST_P(OpaqueTypeTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
NativeTypeId::For<OpaqueType>());
EXPECT_EQ(NativeTypeId::Of(Type(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))),
NativeTypeId::For<OpaqueType>());
}
TEST_P(OpaqueTypeTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<OpaqueType>(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
EXPECT_TRUE(InstanceOf<OpaqueType>(
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))));
}
TEST_P(OpaqueTypeTest, Cast) {
EXPECT_THAT(Cast<OpaqueType>(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
An<OpaqueType>());
EXPECT_THAT(Cast<OpaqueType>(Type(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))),
An<OpaqueType>());
}
TEST_P(OpaqueTypeTest, As) {
EXPECT_THAT(As<OpaqueType>(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
Ne(absl::nullopt));
EXPECT_THAT(As<OpaqueType>(Type(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))),
Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
OpaqueTypeTest, OpaqueTypeTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
OpaqueTypeTest::ToString);
class OpaqueTypeViewTest
: public common_internal::ThreadCompatibleMemoryTest<> {};
TEST_P(OpaqueTypeViewTest, Kind) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(OpaqueTypeView(type).kind(), OpaqueTypeView::kKind);
EXPECT_EQ(TypeView(OpaqueTypeView(type)).kind(), OpaqueTypeView::kKind);
}
TEST_P(OpaqueTypeViewTest, Name) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(OpaqueTypeView(type).name(), "test.Opaque");
EXPECT_EQ(TypeView(OpaqueTypeView(type)).name(), "test.Opaque");
}
TEST_P(OpaqueTypeViewTest, DebugString) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
{
std::ostringstream out;
out << OpaqueTypeView(type);
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << TypeView(OpaqueTypeView(type));
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
}
TEST_P(OpaqueTypeViewTest, Hash) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(absl::HashOf(OpaqueTypeView(type)),
absl::HashOf(OpaqueTypeView(type)));
EXPECT_EQ(absl::HashOf(OpaqueTypeView(type)), absl::HashOf(OpaqueType(type)));
}
TEST_P(OpaqueTypeViewTest, Equal) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(OpaqueTypeView(type), OpaqueTypeView(type));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), OpaqueTypeView(type));
EXPECT_EQ(OpaqueTypeView(type), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(OpaqueTypeView(type), OpaqueType(type));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), OpaqueType(type));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), Type(OpaqueType(type)));
EXPECT_EQ(OpaqueType(type), OpaqueTypeView(type));
EXPECT_EQ(OpaqueType(type), OpaqueTypeView(type));
EXPECT_EQ(OpaqueType(type), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(Type(OpaqueType(type)), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(OpaqueTypeView(type), OpaqueType(type));
}
TEST_P(OpaqueTypeViewTest, NativeTypeId) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(NativeTypeId::Of(OpaqueTypeView(type)),
NativeTypeId::For<OpaqueTypeView>());
EXPECT_EQ(NativeTypeId::Of(TypeView(OpaqueTypeView(type))),
NativeTypeId::For<OpaqueTypeView>());
}
TEST_P(OpaqueTypeViewTest, InstanceOf) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_TRUE(InstanceOf<OpaqueTypeView>(OpaqueTypeView(type)));
EXPECT_TRUE(InstanceOf<OpaqueTypeView>(TypeView(OpaqueTypeView(type))));
}
TEST_P(OpaqueTypeViewTest, Cast) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_THAT(Cast<OpaqueTypeView>(OpaqueTypeView(type)), An<OpaqueTypeView>());
EXPECT_THAT(Cast<OpaqueTypeView>(TypeView(OpaqueTypeView(type))),
An<OpaqueTypeView>());
}
TEST_P(OpaqueTypeViewTest, As) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_THAT(As<OpaqueTypeView>(OpaqueTypeView(type)), Ne(absl::nullopt));
EXPECT_THAT(As<OpaqueTypeView>(TypeView(OpaqueTypeView(type))),
Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
OpaqueTypeViewTest, OpaqueTypeViewTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
OpaqueTypeViewTest::ToString);
}
} | std::string OpaqueTypeView::DebugString() const {
return OpaqueDebugString(name(), parameters());
} | TEST_P(OpaqueTypeViewTest, DebugString) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
{
std::ostringstream out;
out << OpaqueTypeView(type);
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << TypeView(OpaqueTypeView(type));
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
} |
#include "arolla/expr/operators/while_loop/while_loop_impl.h"
#include <algorithm>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/operators/while_loop/while_loop.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators::while_loop_impl {
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::Placeholder;
absl::StatusOr<std::pair<ExprNodePtr, NamedExpressions>> ExtractImmutables(
const ExprNodePtr& expr, std::function<std::string(const ExprNodePtr& node)>
immutable_naming_function) {
NamedExpressions immutables;
struct Visit {
ExprNodePtr expr;
bool has_placeholder_dep;
bool has_leaf_dep;
};
ASSIGN_OR_RETURN(
(auto [converted_expr, has_placeholder_dep, has_leaf_dep]),
expr::PostOrderTraverse(
expr,
[&](const ExprNodePtr& node,
absl::Span<const Visit* const> visits) -> absl::StatusOr<Visit> {
if (node->is_placeholder()) {
return Visit{.expr = node,
.has_placeholder_dep = true,
.has_leaf_dep = false};
}
if (node->is_leaf()) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = true};
}
bool has_placeholder_dep = std::any_of(
visits.begin(), visits.end(),
[](const auto& v) { return v->has_placeholder_dep; });
bool has_leaf_dep =
std::any_of(visits.begin(), visits.end(),
[](const auto& v) { return v->has_leaf_dep; });
if (!has_placeholder_dep) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = has_leaf_dep};
}
std::vector<ExprNodePtr> new_deps;
new_deps.reserve(visits.size());
for (const auto& visit : visits) {
if (visit->has_placeholder_dep || !visit->has_leaf_dep) {
new_deps.push_back(visit->expr);
} else {
auto placeholder_key = immutable_naming_function(visit->expr);
new_deps.emplace_back(Placeholder(placeholder_key));
immutables.emplace(std::move(placeholder_key), visit->expr);
}
}
ASSIGN_OR_RETURN(auto new_node, expr::WithNewDependencies(
node, std::move(new_deps)));
return Visit{.expr = new_node,
.has_placeholder_dep = true,
.has_leaf_dep = has_leaf_dep};
}));
if (!has_placeholder_dep) {
DCHECK(immutables.empty());
auto placeholder_key = immutable_naming_function(converted_expr);
immutables.emplace(placeholder_key, converted_expr);
converted_expr = Placeholder(placeholder_key);
}
return {{std::move(converted_expr), std::move(immutables)}};
}
} | #include "arolla/expr/operators/while_loop/while_loop_impl.h"
#include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_format.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr_operators::while_loop_impl {
namespace {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
class WhileLoopImplTest : public ::testing::Test {
protected:
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(WhileLoopImplTest, ExtractImmutables) {
absl::flat_hash_map<Fingerprint, std::string> immutable_names;
auto immutable_naming_function = [&](const ExprNodePtr& node) -> std::string {
if (auto it = immutable_names.find(node->fingerprint());
it != immutable_names.end()) {
return it->second;
}
std::string name = absl::StrFormat("_immutable_%d", immutable_names.size());
immutable_names.emplace(node->fingerprint(), name);
return name;
};
{
auto expr = Literal(int64_t{1});
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(Placeholder("_immutable_0")),
UnorderedElementsAre(Pair(
"_immutable_0", EqualsExpr(Literal<int64_t>(1)))))));
}
{
auto expr = Leaf("fifty");
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(Placeholder("_immutable_1")),
UnorderedElementsAre(Pair(
"_immutable_1", EqualsExpr(Leaf("fifty")))))));
}
{
auto expr = Placeholder("seven");
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(expr), IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{Leaf("two"),
CallOp("math.add", {Placeholder("fifty"), Leaf("seven")})}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add",
{Placeholder("_immutable_3"),
CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")})})),
UnorderedElementsAre(
Pair("_immutable_3", EqualsExpr(Leaf("two"))),
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)})),
IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr57, CallOp("math.add", {Leaf("fifty"), Literal<int64_t>(7)}));
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("math.add", {expr57, Placeholder("two")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {Placeholder("_immutable_4"), Placeholder("two")})),
UnorderedElementsAre(Pair("_immutable_4", EqualsExpr(expr57))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Placeholder("fifty"), Leaf("seven")}),
Leaf("seven")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")}),
Placeholder("_immutable_2")})),
UnorderedElementsAre(
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Literal<int64_t>(1), Leaf("fifty")}),
Placeholder("seven")}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp("math.add", {Placeholder("_immutable_5"),
Placeholder("seven")})),
UnorderedElementsAre(Pair(
"_immutable_5",
EqualsExpr(CallOp("math.add", {Literal<int64_t>(1),
Leaf("fifty")})))))));
}
}
}
} | absl::StatusOr<std::pair<ExprNodePtr, NamedExpressions>> ExtractImmutables(
const ExprNodePtr& expr, std::function<std::string(const ExprNodePtr& node)>
immutable_naming_function) {
NamedExpressions immutables;
struct Visit {
ExprNodePtr expr;
bool has_placeholder_dep;
bool has_leaf_dep;
};
ASSIGN_OR_RETURN(
(auto [converted_expr, has_placeholder_dep, has_leaf_dep]),
expr::PostOrderTraverse(
expr,
[&](const ExprNodePtr& node,
absl::Span<const Visit* const> visits) -> absl::StatusOr<Visit> {
if (node->is_placeholder()) {
return Visit{.expr = node,
.has_placeholder_dep = true,
.has_leaf_dep = false};
}
if (node->is_leaf()) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = true};
}
bool has_placeholder_dep = std::any_of(
visits.begin(), visits.end(),
[](const auto& v) { return v->has_placeholder_dep; });
bool has_leaf_dep =
std::any_of(visits.begin(), visits.end(),
[](const auto& v) { return v->has_leaf_dep; });
if (!has_placeholder_dep) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = has_leaf_dep};
}
std::vector<ExprNodePtr> new_deps;
new_deps.reserve(visits.size());
for (const auto& visit : visits) {
if (visit->has_placeholder_dep || !visit->has_leaf_dep) {
new_deps.push_back(visit->expr);
} else {
auto placeholder_key = immutable_naming_function(visit->expr);
new_deps.emplace_back(Placeholder(placeholder_key));
immutables.emplace(std::move(placeholder_key), visit->expr);
}
}
ASSIGN_OR_RETURN(auto new_node, expr::WithNewDependencies(
node, std::move(new_deps)));
return Visit{.expr = new_node,
.has_placeholder_dep = true,
.has_leaf_dep = has_leaf_dep};
}));
if (!has_placeholder_dep) {
DCHECK(immutables.empty());
auto placeholder_key = immutable_naming_function(converted_expr);
immutables.emplace(placeholder_key, converted_expr);
converted_expr = Placeholder(placeholder_key);
}
return {{std::move(converted_expr), std::move(immutables)}};
} | TEST_F(WhileLoopImplTest, ExtractImmutables) {
absl::flat_hash_map<Fingerprint, std::string> immutable_names;
auto immutable_naming_function = [&](const ExprNodePtr& node) -> std::string {
if (auto it = immutable_names.find(node->fingerprint());
it != immutable_names.end()) {
return it->second;
}
std::string name = absl::StrFormat("_immutable_%d", immutable_names.size());
immutable_names.emplace(node->fingerprint(), name);
return name;
};
{
auto expr = Literal(int64_t{1});
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(Placeholder("_immutable_0")),
UnorderedElementsAre(Pair(
"_immutable_0", EqualsExpr(Literal<int64_t>(1)))))));
}
{
auto expr = Leaf("fifty");
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(Placeholder("_immutable_1")),
UnorderedElementsAre(Pair(
"_immutable_1", EqualsExpr(Leaf("fifty")))))));
}
{
auto expr = Placeholder("seven");
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(expr), IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{Leaf("two"),
CallOp("math.add", {Placeholder("fifty"), Leaf("seven")})}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add",
{Placeholder("_immutable_3"),
CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")})})),
UnorderedElementsAre(
Pair("_immutable_3", EqualsExpr(Leaf("two"))),
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)})),
IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr57, CallOp("math.add", {Leaf("fifty"), Literal<int64_t>(7)}));
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("math.add", {expr57, Placeholder("two")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {Placeholder("_immutable_4"), Placeholder("two")})),
UnorderedElementsAre(Pair("_immutable_4", EqualsExpr(expr57))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Placeholder("fifty"), Leaf("seven")}),
Leaf("seven")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")}),
Placeholder("_immutable_2")})),
UnorderedElementsAre(
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Literal<int64_t>(1), Leaf("fifty")}),
Placeholder("seven")}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp("math.add", {Placeholder("_immutable_5"),
Placeholder("seven")})),
UnorderedElementsAre(Pair(
"_immutable_5",
EqualsExpr(CallOp("math.add", {Literal<int64_t>(1),
Leaf("fifty")})))))));
}
} |
#ifndef TENSORFLOW_TSL_LIB_GTL_INT_TYPE_H_
#define TENSORFLOW_TSL_LIB_GTL_INT_TYPE_H_
#include <stddef.h>
#include <functional>
#include <iosfwd>
#include <ostream>
#include <unordered_map>
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
template <typename IntTypeName, typename _ValueType>
class IntType;
#define TSL_LIB_GTL_DEFINE_INT_TYPE(int_type_name, value_type) \
struct int_type_name##_tag_ {}; \
typedef ::tsl::gtl::IntType<int_type_name##_tag_, value_type> int_type_name;
template <typename IntTypeName, typename _ValueType>
class IntType {
public:
typedef _ValueType ValueType;
typedef IntType<IntTypeName, ValueType> ThisType;
struct Hasher {
size_t operator()(const IntType& arg) const {
return static_cast<size_t>(arg.value());
}
};
template <typename H>
friend H AbslHashValue(H h, const IntType& i) {
return H::combine(std::move(h), i.value());
}
public:
constexpr IntType() : value_(0) {}
constexpr explicit IntType(ValueType value) : value_(value) {}
constexpr ValueType value() const { return value_; }
template <typename ValType>
constexpr ValType value() const {
return static_cast<ValType>(value_);
}
ThisType& operator++() {
++value_;
return *this;
}
const ThisType operator++(int v) {
ThisType temp(*this);
++value_;
return temp;
}
ThisType& operator--() {
--value_;
return *this;
}
const ThisType operator--(int v) {
ThisType temp(*this);
--value_;
return temp;
}
constexpr bool operator!() const { return value_ == 0; }
constexpr const ThisType operator+() const { return ThisType(value_); }
constexpr const ThisType operator-() const { return ThisType(-value_); }
constexpr const ThisType operator~() const { return ThisType(~value_); }
#define INT_TYPE_ASSIGNMENT_OP(op) \
ThisType& operator op(const ThisType& arg_value) { \
value_ op arg_value.value(); \
return *this; \
} \
ThisType& operator op(ValueType arg_value) { \
value_ op arg_value; \
return *this; \
}
INT_TYPE_ASSIGNMENT_OP(+=);
INT_TYPE_ASSIGNMENT_OP(-=);
INT_TYPE_ASSIGNMENT_OP(*=);
INT_TYPE_ASSIGNMENT_OP(/=);
INT_TYPE_ASSIGNMENT_OP(<<=);
INT_TYPE_ASSIGNMENT_OP(>>=);
INT_TYPE_ASSIGNMENT_OP(%=);
#undef INT_TYPE_ASSIGNMENT_OP
ThisType& operator=(ValueType arg_value) {
value_ = arg_value;
return *this;
}
private:
ValueType value_;
static_assert(std::is_integral<ValueType>::value, "invalid integer type");
} TF_PACKED;
template <typename IntTypeName, typename ValueType>
std::ostream& operator<<(std::ostream& os,
IntType<IntTypeName, ValueType> arg) {
return os << arg.value();
}
#define INT_TYPE_ARITHMETIC_OP(op) \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
IntType<IntTypeName, ValueType> id_1, \
IntType<IntTypeName, ValueType> id_2) { \
return IntType<IntTypeName, ValueType>(id_1.value() op id_2.value()); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
IntType<IntTypeName, ValueType> id, \
typename IntType<IntTypeName, ValueType>::ValueType arg_val) { \
return IntType<IntTypeName, ValueType>(id.value() op arg_val); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr IntType<IntTypeName, ValueType> operator op( \
typename IntType<IntTypeName, ValueType>::ValueType arg_val, \
IntType<IntTypeName, ValueType> id) { \
return IntType<IntTypeName, ValueType>(arg_val op id.value()); \
}
INT_TYPE_ARITHMETIC_OP(+);
INT_TYPE_ARITHMETIC_OP(-);
INT_TYPE_ARITHMETIC_OP(*);
INT_TYPE_ARITHMETIC_OP(/);
INT_TYPE_ARITHMETIC_OP(<<);
INT_TYPE_ARITHMETIC_OP(>>);
INT_TYPE_ARITHMETIC_OP(%);
#undef INT_TYPE_ARITHMETIC_OP
#define INT_TYPE_COMPARISON_OP(op) \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
IntType<IntTypeName, ValueType> id_1, \
IntType<IntTypeName, ValueType> id_2) { \
return id_1.value() op id_2.value(); \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
IntType<IntTypeName, ValueType> id, \
typename IntType<IntTypeName, ValueType>::ValueType val) { \
return id.value() op val; \
} \
template <typename IntTypeName, typename ValueType> \
static inline constexpr bool operator op( \
typename IntType<IntTypeName, ValueType>::ValueType val, \
IntType<IntTypeName, ValueType> id) { \
return val op id.value(); \
}
INT_TYPE_COMPARISON_OP(==);
INT_TYPE_COMPARISON_OP(!=);
INT_TYPE_COMPARISON_OP(<);
INT_TYPE_COMPARISON_OP(<=);
INT_TYPE_COMPARISON_OP(>);
INT_TYPE_COMPARISON_OP(>=);
#undef INT_TYPE_COMPARISON_OP
}
}
#endif | #include "tsl/lib/gtl/int_type.h"
#include <memory>
#include <unordered_map>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TSL_LIB_GTL_DEFINE_INT_TYPE(Int8_IT, int8);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt8_IT, uint8);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int16_IT, int16);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt16_IT, uint16);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int32_IT, int32);
TSL_LIB_GTL_DEFINE_INT_TYPE(Int64_IT, int64_t);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt32_IT, uint32);
TSL_LIB_GTL_DEFINE_INT_TYPE(UInt64_IT, uint64);
TSL_LIB_GTL_DEFINE_INT_TYPE(Long_IT, long);
template <typename IntType_Type>
class IntTypeTest : public ::testing::Test {};
typedef ::testing::Types<Int8_IT, UInt8_IT, Int16_IT, UInt16_IT, Int32_IT,
Int64_IT, UInt64_IT, Long_IT>
SupportedIntTypes;
TYPED_TEST_SUITE(IntTypeTest, SupportedIntTypes);
TYPED_TEST(IntTypeTest, TestInitialization) {
constexpr TypeParam a;
constexpr TypeParam b(1);
constexpr TypeParam c(b);
EXPECT_EQ(0, a);
EXPECT_EQ(1, b);
EXPECT_EQ(1, c);
}
TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
}
TYPED_TEST(IntTypeTest, TestHashFunctor) {
std::unordered_map<TypeParam, char, typename TypeParam::Hasher> map;
TypeParam a(0);
map[a] = 'c';
EXPECT_EQ('c', map[a]);
map[++a] = 'o';
EXPECT_EQ('o', map[a]);
TypeParam b(a);
EXPECT_EQ(typename TypeParam::Hasher()(a), typename TypeParam::Hasher()(b));
}
TYPED_TEST(IntTypeTest, TestValueAccessor) {
constexpr typename TypeParam::ValueType i = -1;
constexpr TypeParam int_type(i);
EXPECT_EQ(i, int_type.value());
static_assert(int_type.value() == i, "value() failed");
EXPECT_EQ(static_cast<int>(i), int_type.template value<int>());
EXPECT_EQ(static_cast<int8>(i), int_type.template value<int8>());
EXPECT_EQ(static_cast<int16>(i), int_type.template value<int16>());
EXPECT_EQ(static_cast<int32>(i), int_type.template value<int32>());
EXPECT_EQ(static_cast<uint32>(i), int_type.template value<uint32>());
EXPECT_EQ(static_cast<int64_t>(i), int_type.template value<int64_t>());
EXPECT_EQ(static_cast<uint64>(i), int_type.template value<uint64>());
EXPECT_EQ(static_cast<long>(i), int_type.template value<long>());
static_assert(int_type.template value<int>() == static_cast<int>(i),
"value<Value>() failed");
}
TYPED_TEST(IntTypeTest, TestMove) {
struct NotCopyable {
TypeParam inttype;
std::unique_ptr<int> ptr;
static NotCopyable Make(int i) {
NotCopyable f;
f.inttype = TypeParam(i);
f.ptr.reset(new int(i));
return f;
}
};
NotCopyable foo = NotCopyable::Make(123);
EXPECT_EQ(123, foo.inttype);
EXPECT_EQ(123, *foo.ptr);
foo = NotCopyable::Make(321);
EXPECT_EQ(321, foo.inttype);
EXPECT_EQ(321, *foo.ptr);
}
} | INT_TYPE_ASSIGNMENT_OP(-=);
INT_TYPE_ASSIGNMENT_OP(*=);
INT_TYPE_ASSIGNMENT_OP(/=);
INT_TYPE_ASSIGNMENT_OP(<<=);
INT_TYPE_ASSIGNMENT_OP(>>=);
INT_TYPE_ASSIGNMENT_OP(%=);
#undef INT_TYPE_ASSIGNMENT_OP
ThisType& operator=(ValueType arg_value) {
value_ = arg_value;
return *this;
} | TYPED_TEST(IntTypeTest, TestOperators) {
TypeParam a(0);
TypeParam b(1);
TypeParam c(2);
constexpr TypeParam d(3);
constexpr TypeParam e(4);
EXPECT_EQ(0, (a++).value());
EXPECT_EQ(2, (++a).value());
EXPECT_EQ(2, (a--).value());
EXPECT_EQ(0, (--a).value());
EXPECT_EQ(true, !a);
EXPECT_EQ(false, !b);
static_assert(!d == false, "Unary operator! failed");
EXPECT_EQ(a.value(), +a);
static_assert(+d == d.value(), "Unary operator+ failed");
EXPECT_EQ(-a.value(), -a);
static_assert(-d == -d.value(), "Unary operator- failed");
EXPECT_EQ(~a.value(), ~a);
EXPECT_EQ(~b.value(), ~b);
static_assert(~d == ~d.value(), "Unary operator~ failed");
c = a = b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = b = 2;
EXPECT_EQ(2, b.value());
EXPECT_EQ(2, c.value());
c = a += b;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= b;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= b;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= b;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a += 2;
EXPECT_EQ(3, a.value());
EXPECT_EQ(3, c.value());
c = a -= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a *= 2;
EXPECT_EQ(2, a.value());
EXPECT_EQ(2, c.value());
c = a /= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a <<= 2;
EXPECT_EQ(4, a.value());
EXPECT_EQ(4, c.value());
c = a >>= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
c = a %= 2;
EXPECT_EQ(1, a.value());
EXPECT_EQ(1, c.value());
a = 0;
b = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == 0);
EXPECT_FALSE(1 == a);
static_assert(d == d, "operator== failed");
static_assert(d == 3, "operator== failed");
static_assert(3 == d, "operator== failed");
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != 1);
EXPECT_FALSE(0 != a);
static_assert(d != e, "operator!= failed");
static_assert(d != 4, "operator!= failed");
static_assert(4 != d, "operator!= failed");
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < 1);
EXPECT_FALSE(0 < a);
static_assert(d < e, "operator< failed");
static_assert(d < 4, "operator< failed");
static_assert(3 < e, "operator< failed");
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= 1);
EXPECT_TRUE(0 <= a);
static_assert(d <= e, "operator<= failed");
static_assert(d <= 4, "operator<= failed");
static_assert(3 <= e, "operator<= failed");
EXPECT_FALSE(a > b);
EXPECT_FALSE(a > 1);
EXPECT_FALSE(0 > a);
static_assert(e > d, "operator> failed");
static_assert(e > 3, "operator> failed");
static_assert(4 > d, "operator> failed");
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= 1);
EXPECT_TRUE(0 >= a);
static_assert(e >= d, "operator>= failed");
static_assert(e >= 3, "operator>= failed");
static_assert(4 >= d, "operator>= failed");
a = 1;
b = 3;
EXPECT_EQ(4, (a + b).value());
EXPECT_EQ(4, (a + 3).value());
EXPECT_EQ(4, (1 + b).value());
static_assert((d + e).value() == 7, "Binary operator+ failed");
static_assert((d + 4).value() == 7, "Binary operator+ failed");
static_assert((3 + e).value() == 7, "Binary operator+ failed");
EXPECT_EQ(2, (b - a).value());
EXPECT_EQ(2, (b - 1).value());
EXPECT_EQ(2, (3 - a).value());
static_assert((e - d).value() == 1, "Binary operator- failed");
static_assert((e - 3).value() == 1, "Binary operator- failed");
static_assert((4 - d).value() == 1, "Binary operator- failed");
EXPECT_EQ(3, (a * b).value());
EXPECT_EQ(3, (a * 3).value());
EXPECT_EQ(3, (1 * b).value());
static_assert((d * e).value() == 12, "Binary operator* failed");
static_assert((d * 4).value() == 12, "Binary operator* failed");
static_assert((3 * e).value() == 12, "Binary operator* failed");
EXPECT_EQ(0, (a / b).value());
EXPECT_EQ(0, (a / 3).value());
EXPECT_EQ(0, (1 / b).value());
static_assert((d / e).value() == 0, "Binary operator/ failed");
static_assert((d / 4).value() == 0, "Binary operator/ failed");
static_assert((3 / e).value() == 0, "Binary operator/ failed");
EXPECT_EQ(8, (a << b).value());
EXPECT_EQ(8, (a << 3).value());
EXPECT_EQ(8, (1 << b).value());
static_assert((d << e).value() == 48, "Binary operator<< failed");
static_assert((d << 4).value() == 48, "Binary operator<< failed");
static_assert((3 << e).value() == 48, "Binary operator<< failed");
b = 8;
EXPECT_EQ(4, (b >> a).value());
EXPECT_EQ(4, (b >> 1).value());
EXPECT_EQ(4, (8 >> a).value());
static_assert((d >> e).value() == 0, "Binary operator>> failed");
static_assert((d >> 4).value() == 0, "Binary operator>> failed");
static_assert((3 >> e).value() == 0, "Binary operator>> failed");
b = 3;
a = 2;
EXPECT_EQ(1, (b % a).value());
EXPECT_EQ(1, (b % 2).value());
EXPECT_EQ(1, (3 % a).value());
static_assert((e % d).value() == 1, "Binary operator% failed");
static_assert((e % 3).value() == 1, "Binary operator% failed");
static_assert((4 % d).value() == 1, "Binary operator% failed");
} |
#include "tensorflow/lite/experimental/ml_adjacent/algo/image_utils.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
constexpr float kYuv2RgbKernel[] = {1.0f, 0.0f,
1.13988303f,
1.0f, -0.394642334f,
-0.58062185f,
1.0f, 2.03206185f, 0.0f};
constexpr int kYuv2RgbKernelDim =
sizeof(kYuv2RgbKernel) / sizeof(kYuv2RgbKernel[0]);
void ComputeYuvToRgb(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* input_data = reinterpret_cast<const float*>(img->Data());
const dim_t batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({batches, height, width, channels});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertColorSpace(batches, height, width, input_data, output_data,
&kYuv2RgbKernel[0], kYuv2RgbKernelDim);
}
}
const Algo* Impl_YuvToRgb() {
static const Algo yuv_to_rgb = {&ComputeYuvToRgb, nullptr};
return &yuv_to_rgb;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
struct YuvToRgbTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class YuvToRgbTest : public ::testing::TestWithParam<YuvToRgbTestParams> {};
TEST_P(YuvToRgbTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const YuvToRgbTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* yuv_to_rgb = Impl_YuvToRgb();
yuv_to_rgb->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
YuvToRgbTests, YuvToRgbTest,
testing::ValuesIn({
YuvToRgbTestParams{{1, 3, 2, 3},
{
92.5f,
58.3f,
-71.5f,
93.5f,
58.3f,
-71.5f,
102.5f,
58.3f,
-71.5f,
103.5f,
58.3f,
-71.5f,
112.5f,
58.3f,
-71.5f,
113.5f,
58.3f,
-71.5f,
},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{1, 3, 2, 3}},
YuvToRgbTestParams{{2, 3, 2, 3},
{92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f,
92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232,
11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{2, 3, 2, 3}},
}));
}
}
} | const Algo* Impl_YuvToRgb() {
static const Algo yuv_to_rgb = {&ComputeYuvToRgb, nullptr};
return &yuv_to_rgb;
} | TEST_P(YuvToRgbTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const YuvToRgbTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* yuv_to_rgb = Impl_YuvToRgb();
yuv_to_rgb->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
} |
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
namespace tensorflow {
namespace grappler {
VirtualCluster::VirtualCluster(
const std::unordered_map<string, DeviceProperties>& devices)
: VirtualCluster(devices, std::make_unique<OpLevelCostEstimator>(),
ReadyNodeManagerFactory("FirstReady")) {}
VirtualCluster::VirtualCluster(
const std::unordered_map<string, DeviceProperties>& devices,
std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager)
: Cluster(0) {
devices_ = devices;
estimator_ = std::make_unique<AnalyticalCostEstimator>(
this, std::move(node_estimator), std::move(node_manager),
true, false);
}
VirtualCluster::VirtualCluster(const DeviceSet* device_set)
: VirtualCluster(std::unordered_map<string, DeviceProperties>()) {
device_set_ = device_set;
for (const auto& device : device_set_->devices()) {
DeviceProperties props = GetDeviceInfo(device->parsed_name());
if (props.type() == "UNKNOWN") continue;
auto attrs = device->attributes();
props.set_memory_size(attrs.memory_limit());
devices_[device->name()] = props;
}
}
VirtualCluster::~VirtualCluster() {}
Status VirtualCluster::Provision() { return absl::OkStatus(); }
Status VirtualCluster::Initialize(const GrapplerItem& item) {
return absl::OkStatus();
}
Status VirtualCluster::Run(const GraphDef& graph,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* metadata) {
GrapplerItem item;
item.graph = graph;
item.feed = feed;
item.fetch = fetch;
return Run(item, metadata);
}
Status VirtualCluster::Run(const GrapplerItem& item, RunMetadata* metadata) {
if (metadata) {
metadata->clear_step_stats();
metadata->clear_cost_graph();
metadata->clear_partition_graphs();
}
TF_RETURN_IF_ERROR(estimator_->Initialize(item));
TF_RETURN_IF_ERROR(
estimator_->PredictCosts(item.graph, metadata, nullptr));
const std::unordered_map<string, DeviceProperties>& device = GetDevices();
std::unordered_map<string, int64_t> peak_mem_usage =
estimator_->GetScheduler()->GetPeakMemoryUsage();
for (const auto& mem_usage : peak_mem_usage) {
const string& device_name = mem_usage.first;
auto it = device.find(device_name);
if (it == device.end()) {
continue;
}
const DeviceProperties& dev = it->second;
if (dev.memory_size() <= 0) {
continue;
}
int64_t peak_mem = mem_usage.second;
if (peak_mem >= dev.memory_size()) {
return errors::ResourceExhausted(
"Graph requires ", peak_mem, " bytes of memory on device ",
device_name, " to run ", " but device only has ", dev.memory_size(),
" available.");
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include <memory>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class VirtualClusterTest : public ::testing::Test {
public:
void SetUp() override {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
cpu_device.set_memory_size(1024 * 1024);
std::unordered_map<string, DeviceProperties> devices;
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override {
TF_CHECK_OK(cluster_->Shutdown());
cluster_.reset();
}
protected:
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(VirtualClusterTest, ClusterType) {
CHECK_EQ("virtual", cluster_->type());
}
TEST_F(VirtualClusterTest, CostModel) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
EXPECT_LE(4, metadata.cost_graph().node_size());
for (const auto& node : metadata.cost_graph().node()) {
if (node.name().find("Const/Const") != string::npos) {
continue;
}
EXPECT_EQ(1, node.output_info_size());
EXPECT_EQ(40, node.output_info(0).size());
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
if (node.name() == "x") {
EXPECT_EQ(1500, node.compute_cost());
} else {
EXPECT_EQ(2500, node.compute_cost());
}
}
for (const auto& dev_stat : metadata.step_stats().dev_stats()) {
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0", dev_stat.device());
for (const auto& node : dev_stat.node_stats()) {
if (node.node_name() == "AddN") {
EXPECT_EQ(2500, node.op_end_rel_micros());
}
}
}
}
TEST_F(VirtualClusterTest, OutOfMemory) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto zero = ops::Variable(root.WithOpName("zero"), {1024, 1024}, DT_FLOAT);
auto identity = ops::Identity(root.WithOpName("i"), zero);
auto identity2 = ops::Identity(root.WithOpName("i2"), identity);
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("i2");
TF_CHECK_OK(cluster_->Initialize(item));
Status s = cluster_->Run(item.graph, item.feed, item.fetch, nullptr);
EXPECT_EQ(error::RESOURCE_EXHAUSTED, s.code());
}
}
}
} | Status VirtualCluster::Provision() { return absl::OkStatus(); } | TEST_F(VirtualClusterTest, ClusterType) {
CHECK_EQ("virtual", cluster_->type());
}
TEST_F(VirtualClusterTest, CostModel) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
EXPECT_LE(4, metadata.cost_graph().node_size());
for (const auto& node : metadata.cost_graph().node()) {
if (node.name().find("Const/Const") != string::npos) {
continue;
}
EXPECT_EQ(1, node.output_info_size());
EXPECT_EQ(40, node.output_info(0).size());
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
if (node.name() == "x") {
EXPECT_EQ(1500, node.compute_cost());
} else {
EXPECT_EQ(2500, node.compute_cost());
}
}
for (const auto& dev_stat : metadata.step_stats().dev_stats()) {
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0", dev_stat.device());
for (const auto& node : dev_stat.node_stats()) {
if (node.node_name() == "AddN") {
EXPECT_EQ(2500, node.op_end_rel_micros());
}
}
}
}
TEST_F(VirtualClusterTest, OutOfMemory) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto zero = ops::Variable(root.WithOpName("zero"), {1024, 1024}, DT_FLOAT);
auto identity = ops::Identity(root.WithOpName("i"), zero);
auto identity2 = ops::Identity(root.WithOpName("i2"), identity);
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("i2");
TF_CHECK_OK(cluster_->Initialize(item));
Status s = cluster_->Run(item.graph, item.feed, item.fetch, nullptr);
EXPECT_EQ(error::RESOURCE_EXHAUSTED, s.code());
} |
#include "tensorflow/core/common_runtime/request_cost.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace tensorflow {
void RequestCost::RecordCost(
const std::vector<std::pair<absl::string_view, absl::Duration>>& costs) {
absl::MutexLock lock(&mutex_);
for (const auto& cost : costs) {
cost_map_[cost.first] += cost.second;
}
}
absl::flat_hash_map<std::string, absl::Duration> RequestCost::GetCosts() const {
absl::MutexLock lock(&mutex_);
return cost_map_;
}
void RequestCost::RecordMetrics(
const std::vector<std::pair<absl::string_view, double>>& metrics) {
absl::MutexLock lock(&mutex_);
for (const auto& metric : metrics) {
metric_map_[metric.first] = metric.second;
}
}
absl::flat_hash_map<std::string, double> RequestCost::GetMetrics() const {
absl::MutexLock lock(&mutex_);
return metric_map_;
}
void RequestCost::RecordBatchMetrics(const BatchMetrics& batch_metrics) {
absl::MutexLock lock(&mutex_);
batch_metrics_.push_back(batch_metrics);
}
std::vector<RequestCost::BatchMetrics> RequestCost::GetBatchMetrics() const {
absl::MutexLock lock(&mutex_);
return batch_metrics_;
}
} | #include "tensorflow/core/common_runtime/request_cost.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(RequestCostTest, RecordCost) {
RequestCost request_cost;
request_cost.RecordCost(
{{"tpu_v1", absl::Milliseconds(1)}, {"tpu_v2", absl::Milliseconds(2)}});
request_cost.RecordCost({{"tpu_v1", absl::Milliseconds(10)},
{"tpu_v2", absl::Milliseconds(20)},
{"cpu_v1", absl::Milliseconds(30)},
{"cpu_v2", absl::Milliseconds(40)}});
EXPECT_THAT(request_cost.GetCosts(),
UnorderedElementsAre(Pair("tpu_v1", absl::Milliseconds(11)),
Pair("tpu_v2", absl::Milliseconds(22)),
Pair("cpu_v1", absl::Milliseconds(30)),
Pair("cpu_v2", absl::Milliseconds(40))));
request_cost.RecordCost(
{{"cpu_v1", absl::Milliseconds(3)}, {"cpu_v2", absl::Milliseconds(4)}});
EXPECT_THAT(request_cost.GetCosts(),
UnorderedElementsAre(Pair("tpu_v1", absl::Milliseconds(11)),
Pair("tpu_v2", absl::Milliseconds(22)),
Pair("cpu_v1", absl::Milliseconds(33)),
Pair("cpu_v2", absl::Milliseconds(44))));
}
TEST(RequestCostTest, RecordMetrics) {
RequestCost request_cost;
request_cost.RecordMetrics({{"metric_v1", 1}, {"metric_v2", 3.14}});
EXPECT_THAT(
request_cost.GetMetrics(),
UnorderedElementsAre(Pair("metric_v1", 1), Pair("metric_v2", 3.14)));
request_cost.RecordMetrics({{"metric_v1", 11},
{"metric_v2", 3.14159},
{"other_metric_v1", 3},
{"other_metric_v2", 4}});
EXPECT_THAT(request_cost.GetMetrics(),
UnorderedElementsAre(
Pair("metric_v1", 11), Pair("metric_v2", 3.14159),
Pair("other_metric_v1", 3), Pair("other_metric_v2", 4)));
}
TEST(RequestCostTest, RecordBatchMetrics) {
RequestCost request_cost;
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
8,
8,
0,
{{"gcu", absl::Milliseconds(80)}, {"tpu", absl::Milliseconds(160)}}});
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
4,
2,
1,
{{"gcu", absl::Milliseconds(40)}, {"tpu", absl::Milliseconds(80)}}});
EXPECT_THAT(
request_cost.GetBatchMetrics(),
ElementsAre(
FieldsAre(8, 8, 0,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(80)),
Pair("tpu", absl::Milliseconds(160)))),
FieldsAre(
4, 2, 1,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(40)),
Pair("tpu", absl::Milliseconds(80))))));
}
}
} | std::vector<RequestCost::BatchMetrics> RequestCost::GetBatchMetrics() const {
absl::MutexLock lock(&mutex_);
return batch_metrics_;
} | TEST(RequestCostTest, RecordBatchMetrics) {
RequestCost request_cost;
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
8,
8,
0,
{{"gcu", absl::Milliseconds(80)}, {"tpu", absl::Milliseconds(160)}}});
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
4,
2,
1,
{{"gcu", absl::Milliseconds(40)}, {"tpu", absl::Milliseconds(80)}}});
EXPECT_THAT(
request_cost.GetBatchMetrics(),
ElementsAre(
FieldsAre(8, 8, 0,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(80)),
Pair("tpu", absl::Milliseconds(160)))),
FieldsAre(
4, 2, 1,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(40)),
Pair("tpu", absl::Milliseconds(80))))));
} |
#include <cmath>
#include <cstddef>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/number.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
namespace cel::common_internal {
namespace {
std::string DoubleDebugString(double value) {
if (std::isfinite(value)) {
if (std::floor(value) != value) {
return absl::StrCat(value);
}
std::string stringified = absl::StrCat(value);
if (!absl::StrContains(stringified, '.')) {
absl::StrAppend(&stringified, ".0");
} else {
}
return stringified;
}
if (std::isnan(value)) {
return "nan";
}
if (std::signbit(value)) {
return "-infinity";
}
return "+infinity";
}
}
std::string DoubleValueBase::DebugString() const {
return DoubleDebugString(NativeValue());
}
absl::StatusOr<size_t> DoubleValueBase::GetSerializedSize(
AnyToJsonConverter&) const {
return internal::SerializedDoubleValueSize(NativeValue());
}
absl::Status DoubleValueBase::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeDoubleValue(NativeValue(), value);
}
absl::StatusOr<absl::Cord> DoubleValueBase::Serialize(
AnyToJsonConverter& value_manager) const {
absl::Cord value;
CEL_RETURN_IF_ERROR(SerializeTo(value_manager, value));
return value;
}
absl::StatusOr<std::string> DoubleValueBase::GetTypeUrl(
absl::string_view prefix) const {
return MakeTypeUrlWithPrefix(prefix, "google.protobuf.DoubleValue");
}
absl::StatusOr<Any> DoubleValueBase::ConvertToAny(
AnyToJsonConverter& value_manager, absl::string_view prefix) const {
CEL_ASSIGN_OR_RETURN(auto value, Serialize(value_manager));
CEL_ASSIGN_OR_RETURN(auto type_url, GetTypeUrl(prefix));
return MakeAny(std::move(type_url), std::move(value));
}
absl::StatusOr<Json> DoubleValueBase::ConvertToJson(AnyToJsonConverter&) const {
return NativeValue();
}
absl::Status DoubleValueBase::Equal(ValueManager&, ValueView other,
Value& result) const {
if (auto other_value = As<DoubleValueView>(other); other_value.has_value()) {
result = BoolValueView{NativeValue() == other_value->NativeValue()};
return absl::OkStatus();
}
if (auto other_value = As<IntValueView>(other); other_value.has_value()) {
result =
BoolValueView{internal::Number::FromDouble(NativeValue()) ==
internal::Number::FromInt64(other_value->NativeValue())};
return absl::OkStatus();
}
if (auto other_value = As<UintValueView>(other); other_value.has_value()) {
result =
BoolValueView{internal::Number::FromDouble(NativeValue()) ==
internal::Number::FromUint64(other_value->NativeValue())};
return absl::OkStatus();
}
result = BoolValueView{false};
return absl::OkStatus();
}
absl::StatusOr<Value> DoubleValueBase::Equal(ValueManager& value_manager,
ValueView other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <cmath>
#include <sstream>
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
using cel::internal::IsOkAndHolds;
using DoubleValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(DoubleValueTest, Kind) {
EXPECT_EQ(DoubleValue(1.0).kind(), DoubleValue::kKind);
EXPECT_EQ(Value(DoubleValue(1.0)).kind(), DoubleValue::kKind);
}
TEST_P(DoubleValueTest, DebugString) {
{
std::ostringstream out;
out << DoubleValue(0.0);
EXPECT_EQ(out.str(), "0.0");
}
{
std::ostringstream out;
out << DoubleValue(1.0);
EXPECT_EQ(out.str(), "1.0");
}
{
std::ostringstream out;
out << DoubleValue(1.1);
EXPECT_EQ(out.str(), "1.1");
}
{
std::ostringstream out;
out << DoubleValue(NAN);
EXPECT_EQ(out.str(), "nan");
}
{
std::ostringstream out;
out << DoubleValue(INFINITY);
EXPECT_EQ(out.str(), "+infinity");
}
{
std::ostringstream out;
out << DoubleValue(-INFINITY);
EXPECT_EQ(out.str(), "-infinity");
}
{
std::ostringstream out;
out << Value(DoubleValue(0.0));
EXPECT_EQ(out.str(), "0.0");
}
}
TEST_P(DoubleValueTest, GetSerializedSize) {
EXPECT_THAT(DoubleValue().GetSerializedSize(value_manager()),
IsOkAndHolds(0));
}
TEST_P(DoubleValueTest, ConvertToAny) {
EXPECT_THAT(DoubleValue().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.DoubleValue"),
absl::Cord())));
}
TEST_P(DoubleValueTest, ConvertToJson) {
EXPECT_THAT(DoubleValue(1.0).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(DoubleValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(DoubleValue(1.0)),
NativeTypeId::For<DoubleValue>());
EXPECT_EQ(NativeTypeId::Of(Value(DoubleValue(1.0))),
NativeTypeId::For<DoubleValue>());
}
TEST_P(DoubleValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<DoubleValue>(DoubleValue(1.0)));
EXPECT_TRUE(InstanceOf<DoubleValue>(Value(DoubleValue(1.0))));
}
TEST_P(DoubleValueTest, Cast) {
EXPECT_THAT(Cast<DoubleValue>(DoubleValue(1.0)), An<DoubleValue>());
EXPECT_THAT(Cast<DoubleValue>(Value(DoubleValue(1.0))), An<DoubleValue>());
}
TEST_P(DoubleValueTest, As) {
EXPECT_THAT(As<DoubleValue>(DoubleValue(1.0)), Ne(absl::nullopt));
EXPECT_THAT(As<DoubleValue>(Value(DoubleValue(1.0))), Ne(absl::nullopt));
}
TEST_P(DoubleValueTest, Equality) {
EXPECT_NE(DoubleValue(0.0), 1.0);
EXPECT_NE(1.0, DoubleValue(0.0));
EXPECT_NE(DoubleValue(0.0), DoubleValue(1.0));
}
INSTANTIATE_TEST_SUITE_P(
DoubleValueTest, DoubleValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
DoubleValueTest::ToString);
using DoubleValueViewTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(DoubleValueViewTest, Kind) {
EXPECT_EQ(DoubleValueView(1.0).kind(), DoubleValueView::kKind);
EXPECT_EQ(ValueView(DoubleValueView(1.0)).kind(), DoubleValueView::kKind);
}
TEST_P(DoubleValueViewTest, DebugString) {
{
std::ostringstream out;
out << DoubleValueView(0.0);
EXPECT_EQ(out.str(), "0.0");
}
{
std::ostringstream out;
out << DoubleValueView(1.0);
EXPECT_EQ(out.str(), "1.0");
}
{
std::ostringstream out;
out << DoubleValueView(1.1);
EXPECT_EQ(out.str(), "1.1");
}
{
std::ostringstream out;
out << DoubleValueView(NAN);
EXPECT_EQ(out.str(), "nan");
}
{
std::ostringstream out;
out << DoubleValueView(INFINITY);
EXPECT_EQ(out.str(), "+infinity");
}
{
std::ostringstream out;
out << DoubleValueView(-INFINITY);
EXPECT_EQ(out.str(), "-infinity");
}
{
std::ostringstream out;
out << ValueView(DoubleValueView(0.0));
EXPECT_EQ(out.str(), "0.0");
}
}
TEST_P(DoubleValueViewTest, GetSerializedSize) {
EXPECT_THAT(DoubleValue().GetSerializedSize(value_manager()),
IsOkAndHolds(0));
}
TEST_P(DoubleValueViewTest, ConvertToAny) {
EXPECT_THAT(DoubleValue().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.DoubleValue"),
absl::Cord())));
}
TEST_P(DoubleValueViewTest, ConvertToJson) {
EXPECT_THAT(DoubleValueView(1.0).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(DoubleValueViewTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(DoubleValueView(1.0)),
NativeTypeId::For<DoubleValueView>());
EXPECT_EQ(NativeTypeId::Of(ValueView(DoubleValueView(1.0))),
NativeTypeId::For<DoubleValueView>());
}
TEST_P(DoubleValueViewTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<DoubleValueView>(DoubleValueView(1.0)));
EXPECT_TRUE(InstanceOf<DoubleValueView>(ValueView(DoubleValueView(1.0))));
}
TEST_P(DoubleValueViewTest, Cast) {
EXPECT_THAT(Cast<DoubleValueView>(DoubleValueView(1.0)),
An<DoubleValueView>());
EXPECT_THAT(Cast<DoubleValueView>(ValueView(DoubleValueView(1.0))),
An<DoubleValueView>());
}
TEST_P(DoubleValueViewTest, As) {
EXPECT_THAT(As<DoubleValueView>(DoubleValueView(1.0)), Ne(absl::nullopt));
EXPECT_THAT(As<DoubleValueView>(ValueView(DoubleValueView(1.0))),
Ne(absl::nullopt));
}
TEST_P(DoubleValueViewTest, Equality) {
EXPECT_NE(DoubleValueView(DoubleValue(0.0)), 1.0);
EXPECT_NE(1.0, DoubleValueView(0.0));
EXPECT_NE(DoubleValueView(0.0), DoubleValueView(1.0));
EXPECT_NE(DoubleValueView(0.0), DoubleValue(1.0));
EXPECT_NE(DoubleValue(1.0), DoubleValueView(0.0));
}
INSTANTIATE_TEST_SUITE_P(
DoubleValueViewTest, DoubleValueViewTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
DoubleValueViewTest::ToString);
}
} | absl::StatusOr<size_t> DoubleValueBase::GetSerializedSize(
AnyToJsonConverter&) const {
return internal::SerializedDoubleValueSize(NativeValue());
} | TEST_P(DoubleValueTest, GetSerializedSize) {
EXPECT_THAT(DoubleValue().GetSerializedSize(value_manager()),
IsOkAndHolds(0));
}
TEST_P(DoubleValueViewTest, GetSerializedSize) {
EXPECT_THAT(DoubleValue().GetSerializedSize(value_manager()),
IsOkAndHolds(0));
} |
#ifndef QUICHE_COMMON_PLATFORM_API_QUICHE_STACK_TRACE_H_
#define QUICHE_COMMON_PLATFORM_API_QUICHE_STACK_TRACE_H_
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "quiche_platform_impl/quiche_stack_trace_impl.h"
namespace quiche {
inline std::vector<void*> CurrentStackTrace() {
return CurrentStackTraceImpl();
}
inline std::string SymbolizeStackTrace(absl::Span<void* const> stacktrace) {
return SymbolizeStackTraceImpl(stacktrace);
}
inline std::string QuicheStackTrace() { return QuicheStackTraceImpl(); }
inline bool QuicheShouldRunStackTraceTest() {
return QuicheShouldRunStackTraceTestImpl();
}
}
#endif | #include "quiche/common/platform/api/quiche_stack_trace.h"
#include <cstdint>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
namespace {
bool ShouldRunTest() {
#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
return QuicheShouldRunStackTraceTest();
#else
return false;
#endif
}
ABSL_ATTRIBUTE_NOINLINE std::string QuicheDesignatedStackTraceTestFunction() {
std::string result = QuicheStackTrace();
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return result;
}
ABSL_ATTRIBUTE_NOINLINE std::string
QuicheDesignatedTwoStepStackTraceTestFunction() {
std::string result = SymbolizeStackTrace(CurrentStackTrace());
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return result;
}
TEST(QuicheStackTraceTest, GetStackTrace) {
if (!ShouldRunTest()) {
return;
}
std::string stacktrace = QuicheDesignatedStackTraceTestFunction();
EXPECT_THAT(stacktrace,
testing::HasSubstr("QuicheDesignatedStackTraceTestFunction"));
}
TEST(QuicheStackTraceTest, GetStackTraceInTwoSteps) {
if (!ShouldRunTest()) {
return;
}
std::string stacktrace = QuicheDesignatedTwoStepStackTraceTestFunction();
EXPECT_THAT(stacktrace, testing::HasSubstr(
"QuicheDesignatedTwoStepStackTraceTestFunction"));
}
}
}
} | inline std::string SymbolizeStackTrace(absl::Span<void* const> stacktrace) {
return SymbolizeStackTraceImpl(stacktrace);
} | TEST(QuicheStackTraceTest, GetStackTraceInTwoSteps) {
if (!ShouldRunTest()) {
return;
}
std::string stacktrace = QuicheDesignatedTwoStepStackTraceTestFunction();
EXPECT_THAT(stacktrace, testing::HasSubstr(
"QuicheDesignatedTwoStepStackTraceTestFunction"));
} |
#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <cstring>
#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
inline constexpr size_t AlignUp(size_t x, size_t align) {
return align * ((x + align - 1) / align);
}
class SequenceLock {
public:
constexpr SequenceLock() : lock_(kUninitialized) {}
void MarkInitialized() {
assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
lock_.store(0, std::memory_order_release);
}
bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
int64_t seq_before = lock_.load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
RelaxedCopyFromAtomic(dst, src, size);
std::atomic_thread_fence(std::memory_order_acquire);
int64_t seq_after = lock_.load(std::memory_order_relaxed);
return ABSL_PREDICT_TRUE(seq_before == seq_after);
}
void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
int64_t orig_seq = lock_.load(std::memory_order_relaxed);
assert((orig_seq & 1) == 0);
lock_.store(orig_seq + 1, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_release);
RelaxedCopyToAtomic(dst, src, size);
lock_.store(orig_seq + 2, std::memory_order_release);
}
int64_t ModificationCount() const {
int64_t val = lock_.load(std::memory_order_relaxed);
assert(val != kUninitialized && (val & 1) == 0);
return val / 2;
}
void IncrementModificationCount() {
int64_t val = lock_.load(std::memory_order_relaxed);
assert(val != kUninitialized);
lock_.store(val + 2, std::memory_order_relaxed);
}
private:
static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
size_t size) {
char* dst_byte = static_cast<char*>(dst);
while (size >= sizeof(uint64_t)) {
uint64_t word = src->load(std::memory_order_relaxed);
std::memcpy(dst_byte, &word, sizeof(word));
dst_byte += sizeof(word);
src++;
size -= sizeof(word);
}
if (size > 0) {
uint64_t word = src->load(std::memory_order_relaxed);
std::memcpy(dst_byte, &word, size);
}
}
static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
size_t size) {
const char* src_byte = static_cast<const char*>(src);
while (size >= sizeof(uint64_t)) {
uint64_t word;
std::memcpy(&word, src_byte, sizeof(word));
dst->store(word, std::memory_order_relaxed);
src_byte += sizeof(word);
dst++;
size -= sizeof(word);
}
if (size > 0) {
uint64_t word = 0;
std::memcpy(&word, src_byte, size);
dst->store(word, std::memory_order_relaxed);
}
}
static constexpr int64_t kUninitialized = -1;
std::atomic<int64_t> lock_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/flags/internal/sequence_lock.h"
#include <algorithm>
#include <atomic>
#include <thread>
#include <tuple>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/container/fixed_array.h"
#include "absl/time/clock.h"
namespace {
namespace flags = absl::flags_internal;
class ConcurrentSequenceLockTest
: public testing::TestWithParam<std::tuple<int, int>> {
public:
ConcurrentSequenceLockTest()
: buf_bytes_(std::get<0>(GetParam())),
num_threads_(std::get<1>(GetParam())) {}
protected:
const int buf_bytes_;
const int num_threads_;
};
TEST_P(ConcurrentSequenceLockTest, ReadAndWrite) {
const int buf_words =
flags::AlignUp(buf_bytes_, sizeof(uint64_t)) / sizeof(uint64_t);
absl::FixedArray<std::atomic<uint64_t>> protected_buf(buf_words);
for (auto& v : protected_buf) v = -1;
flags::SequenceLock seq_lock;
std::atomic<bool> stop{false};
std::atomic<int64_t> bad_reads{0};
std::atomic<int64_t> good_reads{0};
std::atomic<int64_t> unsuccessful_reads{0};
std::vector<std::thread> threads;
for (int i = 0; i < num_threads_; i++) {
threads.emplace_back([&]() {
absl::FixedArray<char> local_buf(buf_bytes_);
while (!stop.load(std::memory_order_relaxed)) {
if (seq_lock.TryRead(local_buf.data(), protected_buf.data(),
buf_bytes_)) {
bool good = true;
for (const auto& v : local_buf) {
if (v != local_buf[0]) good = false;
}
if (good) {
good_reads.fetch_add(1, std::memory_order_relaxed);
} else {
bad_reads.fetch_add(1, std::memory_order_relaxed);
}
} else {
unsuccessful_reads.fetch_add(1, std::memory_order_relaxed);
}
}
});
}
while (unsuccessful_reads.load(std::memory_order_relaxed) < num_threads_) {
absl::SleepFor(absl::Milliseconds(1));
}
seq_lock.MarkInitialized();
absl::Time deadline = absl::Now() + absl::Seconds(5);
for (int i = 0; i < 100 && absl::Now() < deadline; i++) {
absl::FixedArray<char> writer_buf(buf_bytes_);
for (auto& v : writer_buf) v = i;
seq_lock.Write(protected_buf.data(), writer_buf.data(), buf_bytes_);
absl::SleepFor(absl::Microseconds(10));
}
stop.store(true, std::memory_order_relaxed);
for (auto& t : threads) t.join();
ASSERT_GE(good_reads, 0);
ASSERT_EQ(bad_reads, 0);
}
std::vector<int> MultiplicativeRange(int low, int high, int scale) {
std::vector<int> result;
for (int current = low; current < high; current *= scale) {
result.push_back(current);
}
result.push_back(high);
return result;
}
#ifndef ABSL_HAVE_THREAD_SANITIZER
const int kMaxThreads = absl::base_internal::NumCPUs();
#else
const int kMaxThreads = std::min(absl::base_internal::NumCPUs(), 4);
#endif
std::vector<int> InterestingBufferSizes() {
std::vector<int> ret;
for (int v : MultiplicativeRange(1, 128, 2)) {
ret.push_back(v);
if (v > 1) {
ret.push_back(v - 1);
}
ret.push_back(v + 1);
}
return ret;
}
INSTANTIATE_TEST_SUITE_P(
TestManyByteSizes, ConcurrentSequenceLockTest,
testing::Combine(
testing::ValuesIn(InterestingBufferSizes()),
testing::ValuesIn(MultiplicativeRange(1, kMaxThreads, 2))));
class SequenceLockTest : public testing::TestWithParam<int> {};
TEST_P(SequenceLockTest, SingleThreaded) {
const int size = GetParam();
absl::FixedArray<std::atomic<uint64_t>> protected_buf(
flags::AlignUp(size, sizeof(uint64_t)) / sizeof(uint64_t));
flags::SequenceLock seq_lock;
seq_lock.MarkInitialized();
std::vector<char> src_buf(size, 'x');
seq_lock.Write(protected_buf.data(), src_buf.data(), size);
std::vector<char> dst_buf(size, '0');
ASSERT_TRUE(seq_lock.TryRead(dst_buf.data(), protected_buf.data(), size));
ASSERT_EQ(src_buf, dst_buf);
}
INSTANTIATE_TEST_SUITE_P(TestManyByteSizes, SequenceLockTest,
testing::Range(1, 128));
} | void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
int64_t orig_seq = lock_.load(std::memory_order_relaxed);
assert((orig_seq & 1) == 0);
lock_.store(orig_seq + 1, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_release);
RelaxedCopyToAtomic(dst, src, size);
lock_.store(orig_seq + 2, std::memory_order_release);
} | TEST_P(ConcurrentSequenceLockTest, ReadAndWrite) {
const int buf_words =
flags::AlignUp(buf_bytes_, sizeof(uint64_t)) / sizeof(uint64_t);
absl::FixedArray<std::atomic<uint64_t>> protected_buf(buf_words);
for (auto& v : protected_buf) v = -1;
flags::SequenceLock seq_lock;
std::atomic<bool> stop{false};
std::atomic<int64_t> bad_reads{0};
std::atomic<int64_t> good_reads{0};
std::atomic<int64_t> unsuccessful_reads{0};
std::vector<std::thread> threads;
for (int i = 0; i < num_threads_; i++) {
threads.emplace_back([&]() {
absl::FixedArray<char> local_buf(buf_bytes_);
while (!stop.load(std::memory_order_relaxed)) {
if (seq_lock.TryRead(local_buf.data(), protected_buf.data(),
buf_bytes_)) {
bool good = true;
for (const auto& v : local_buf) {
if (v != local_buf[0]) good = false;
}
if (good) {
good_reads.fetch_add(1, std::memory_order_relaxed);
} else {
bad_reads.fetch_add(1, std::memory_order_relaxed);
}
} else {
unsuccessful_reads.fetch_add(1, std::memory_order_relaxed);
}
}
});
}
while (unsuccessful_reads.load(std::memory_order_relaxed) < num_threads_) {
absl::SleepFor(absl::Milliseconds(1));
}
seq_lock.MarkInitialized();
absl::Time deadline = absl::Now() + absl::Seconds(5);
for (int i = 0; i < 100 && absl::Now() < deadline; i++) {
absl::FixedArray<char> writer_buf(buf_bytes_);
for (auto& v : writer_buf) v = i;
seq_lock.Write(protected_buf.data(), writer_buf.data(), buf_bytes_);
absl::SleepFor(absl::Microseconds(10));
}
stop.store(true, std::memory_order_relaxed);
for (auto& t : threads) t.join();
ASSERT_GE(good_reads, 0);
ASSERT_EQ(bad_reads, 0);
}
TEST_P(SequenceLockTest, SingleThreaded) {
const int size = GetParam();
absl::FixedArray<std::atomic<uint64_t>> protected_buf(
flags::AlignUp(size, sizeof(uint64_t)) / sizeof(uint64_t));
flags::SequenceLock seq_lock;
seq_lock.MarkInitialized();
std::vector<char> src_buf(size, 'x');
seq_lock.Write(protected_buf.data(), src_buf.data(), size);
std::vector<char> dst_buf(size, '0');
ASSERT_TRUE(seq_lock.TryRead(dst_buf.data(), protected_buf.data(), size));
ASSERT_EQ(src_buf, dst_buf);
} |
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace scatter_nd {
constexpr int kIndices = 0;
constexpr int kUpdates = 1;
constexpr int kShape = 2;
constexpr int kOutputTensor = 0;
template <typename IndicesT>
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* shape,
TfLiteTensor* output) {
const int shape_rank = SizeOfDimension(shape, 0);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape_rank);
const auto* shape_data = GetTensorData<IndicesT>(shape);
for (int i = 0; i < shape_rank; i++) {
output_shape->data[i] = shape_data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
template <typename IndicesT>
TfLiteStatus CheckShapes(TfLiteContext* context, const RuntimeShape& indices,
const RuntimeShape& updates,
const RuntimeShape& shape_shape,
const IndicesT* shape_data) {
TF_LITE_ENSURE(context, (indices.DimensionsCount() >= 1) &&
(updates.DimensionsCount() >= 1) &&
(shape_shape.DimensionsCount() == 1));
const int outer_dims = indices.DimensionsCount() - 1;
for (int i = 0; i < outer_dims; ++i) {
TF_LITE_ENSURE_EQ(context, indices.Dims(i), updates.Dims(i));
}
const int ix = indices.Dims(outer_dims);
TF_LITE_ENSURE_EQ(context, updates.DimensionsCount() - outer_dims,
shape_shape.Dims(0) - ix);
for (int i = 0; i + outer_dims < updates.DimensionsCount(); ++i) {
TF_LITE_ENSURE_EQ(context, updates.Dims(i + outer_dims),
shape_data[ix + i]);
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
switch (updates->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteBool:
case kTfLiteInt8:
case kTfLiteInt64:
case kTfLiteInt32:
break;
default:
TF_LITE_KERNEL_LOG(
context, "Updates of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(updates->type));
return kTfLiteError;
}
if (indices->type != shape->type) {
TF_LITE_KERNEL_LOG(context, "Indices and shape must have the same type.");
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = updates->type;
if (IsConstantOrPersistentTensor(shape)) {
switch (indices->type) {
case kTfLiteInt32:
TF_LITE_ENSURE_OK(
context,
CheckShapes<int32_t>(context, GetTensorShape(indices),
GetTensorShape(updates), GetTensorShape(shape),
GetTensorData<int32_t>(shape)));
return ResizeOutputTensor<int32_t>(context, shape, output);
default:
TF_LITE_KERNEL_LOG(
context, "Indices of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
} else {
SetTensorToDynamic(output);
return kTfLiteOk;
}
}
template <typename IndicesT, typename UpdatesT>
TfLiteStatus ScatterNd(const TfLiteTensor* indices, const TfLiteTensor* updates,
TfLiteTensor* output) {
return reference_ops::ScatterNd(
GetTensorShape(indices), GetTensorData<IndicesT>(indices),
GetTensorShape(updates), GetTensorData<UpdatesT>(updates),
GetTensorShape(output), GetTensorData<UpdatesT>(output));
}
template <typename IndicesT>
TfLiteStatus EvalScatterNd(TfLiteContext* context, const TfLiteTensor* indices,
const TfLiteTensor* updates,
const TfLiteTensor* shape, TfLiteTensor* output) {
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(
context, CheckShapes<IndicesT>(
context, GetTensorShape(indices), GetTensorShape(updates),
GetTensorShape(shape), GetTensorData<IndicesT>(shape)));
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor<IndicesT>(context, shape, output));
}
TfLiteStatus status = kTfLiteError;
switch (updates->type) {
case kTfLiteFloat32:
status = ScatterNd<IndicesT, float>(indices, updates, output);
break;
case kTfLiteUInt8:
status = ScatterNd<IndicesT, uint8_t>(indices, updates, output);
break;
case kTfLiteBool:
status = ScatterNd<IndicesT, bool>(indices, updates, output);
break;
case kTfLiteInt8:
status = ScatterNd<IndicesT, int8_t>(indices, updates, output);
break;
case kTfLiteInt32:
status = ScatterNd<IndicesT, int32_t>(indices, updates, output);
break;
case kTfLiteInt64:
status = ScatterNd<IndicesT, int64_t>(indices, updates, output);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Updates of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(updates->type));
return kTfLiteError;
}
if (status != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "scatter_nd index out of bounds");
}
return status;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (indices->type) {
case kTfLiteInt32:
return EvalScatterNd<int32_t>(context, indices, updates, shape, output);
default:
TF_LITE_KERNEL_LOG(
context, "Indices of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_SCATTER_ND() {
static TfLiteRegistration r = { nullptr, nullptr,
scatter_nd::Prepare, scatter_nd::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class ScatterNdOpModel : public SingleOpModel {
public:
ScatterNdOpModel(const TensorData& indices, const TensorData& updates,
const TensorData& shape) {
indices_ = AddInput(indices);
updates_ = AddInput(updates);
shape_ = AddInput(shape);
output_ = AddOutput(updates.type);
SetBuiltinOp(BuiltinOperator_SCATTER_ND, BuiltinOptions_ScatterNdOptions,
CreateScatterNdOptions(builder_).Union());
BuildInterpreter(
{GetShape(indices_), GetShape(updates_), GetShape(shape_)});
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
void SetUpdates(std::initializer_list<T> data) {
PopulateTensor<T>(updates_, data);
}
template <typename T>
void SetShape(std::initializer_list<T> data) {
PopulateTensor<T>(shape_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int indices_;
int updates_;
int shape_;
int output_;
};
TEST(ScatterNdOpTest, ScatterElementIntoVector) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<float>({9, 10, 11, 12});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({0, 11, 0, 10, 9, 0, 0, 12}));
}
TEST(ScatterNdOpTest, ScatterMatrixIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 1}},
{TensorType_FLOAT32, {2, 4, 4}}, {TensorType_INT32, {3}});
m.SetIndices<int32_t>({0, 2});
m.SetUpdates<float>({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8});
m.SetShape<int32_t>({4, 4, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 4, 4}));
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoMatrix) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4, 4}},
{TensorType_INT32, {2}});
m.SetIndices<int32_t>({ 9, 8, 0, 1});
m.SetUpdates<float>({ 1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16});
m.SetShape<int32_t>({10, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({10, 4}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 9, 10, 11, 12,
13, 14, 15, 16,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
5, 6, 7, 8,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterMatricesIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2}},
{TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 1, 1, 0, 1, 0, 0, 1, 0});
m.SetUpdates<float>({ 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16});
m.SetShape<int32_t>({2, 2, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2, 2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray({ 9, 10, 11, 12,
5, 6, 7, 8,
13, 14, 15, 16,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 3}},
{TensorType_FLOAT32, {2, 2, 5}}, {TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 2, 2, 2, 1, 0, 1, 0, 2, 0, 2, 2, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({3, 3, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5,
}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, OverlappedIndicesSummed) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 1, 0, 0, 2, 0, 2, 1, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
17, 19, 21, 23, 25,
17, 19, 21, 23, 25,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, Int32IndicesUint8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_UINT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<uint8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<uint8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt32Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int32_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int32_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt64Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT64, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int64_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesBoolUpdates) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_BOOL, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<bool>({true, false, true, false});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(
m.GetOutput<bool>(),
ElementsAreArray({false, true, false, false, true, false, false, false}));
}
TEST(ScatterNdOpTest, DynamicShape) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT64, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int64_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
m.SetIndices<int32_t>({ 2, 3, 1, 0, 2, 0, 1, 2});
m.SetShape<int32_t>({3, 4, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 4, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5}));
}
TEST(ScatterNdOpTest, ReadAndWriteArrayLimits) {
ScatterNdOpModel m({TensorType_INT32, {5, 1}}, {TensorType_INT32, {5}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 0, 2});
m.SetUpdates<int32_t>({1, 2, 3, 7, 9});
m.SetShape<int32_t>({5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({5}));
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({7, 3, 9, 2, 1}));
}
TEST(ScatterNdOpTest, OOBRead) {
ScatterNdOpModel m({TensorType_INT32, {1, 1}}, {TensorType_INT32, {1}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4});
m.SetUpdates<int32_t>({1});
m.SetShape<int32_t>({1});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ScatterNdOpTest, OOBWrites) {
ScatterNdOpModel m({TensorType_INT32, {5, 1}}, {TensorType_INT32, {5}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, -0x38, 0x38});
m.SetUpdates<int32_t>({1, 2, 3, 0x44444444, 0x55555555});
m.SetShape<int32_t>({1});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
} | TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (indices->type) {
case kTfLiteInt32:
return EvalScatterNd<int32_t>(context, indices, updates, shape, output);
default:
TF_LITE_KERNEL_LOG(
context, "Indices of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
} | TEST(ScatterNdOpTest, ScatterElementIntoVector) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<float>({9, 10, 11, 12});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({0, 11, 0, 10, 9, 0, 0, 12}));
}
TEST(ScatterNdOpTest, ScatterMatrixIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 1}},
{TensorType_FLOAT32, {2, 4, 4}}, {TensorType_INT32, {3}});
m.SetIndices<int32_t>({0, 2});
m.SetUpdates<float>({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8});
m.SetShape<int32_t>({4, 4, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 4, 4}));
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoMatrix) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4, 4}},
{TensorType_INT32, {2}});
m.SetIndices<int32_t>({ 9, 8, 0, 1});
m.SetUpdates<float>({ 1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16});
m.SetShape<int32_t>({10, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({10, 4}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 9, 10, 11, 12,
13, 14, 15, 16,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
5, 6, 7, 8,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterMatricesIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2}},
{TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 1, 1, 0, 1, 0, 0, 1, 0});
m.SetUpdates<float>({ 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16});
m.SetShape<int32_t>({2, 2, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2, 2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray({ 9, 10, 11, 12,
5, 6, 7, 8,
13, 14, 15, 16,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 3}},
{TensorType_FLOAT32, {2, 2, 5}}, {TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 2, 2, 2, 1, 0, 1, 0, 2, 0, 2, 2, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({3, 3, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5,
}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, OverlappedIndicesSummed) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 1, 0, 0, 2, 0, 2, 1, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
17, 19, 21, 23, 25,
17, 19, 21, 23, 25,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, Int32IndicesUint8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_UINT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<uint8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<uint8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt32Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int32_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int32_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt64Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT64, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int64_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesBoolUpdates) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_BOOL, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<bool>({true, false, true, false});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(
m.GetOutput<bool>(),
ElementsAreArray({false, true, false, false, true, false, false, false}));
} |
#include "util/coding.h"
namespace leveldb {
void PutFixed32(std::string* dst, uint32_t value) {
char buf[sizeof(value)];
EncodeFixed32(buf, value);
dst->append(buf, sizeof(buf));
}
void PutFixed64(std::string* dst, uint64_t value) {
char buf[sizeof(value)];
EncodeFixed64(buf, value);
dst->append(buf, sizeof(buf));
}
char* EncodeVarint32(char* dst, uint32_t v) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
static const int B = 128;
if (v < (1 << 7)) {
*(ptr++) = v;
} else if (v < (1 << 14)) {
*(ptr++) = v | B;
*(ptr++) = v >> 7;
} else if (v < (1 << 21)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = v >> 14;
} else if (v < (1 << 28)) {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = v >> 21;
} else {
*(ptr++) = v | B;
*(ptr++) = (v >> 7) | B;
*(ptr++) = (v >> 14) | B;
*(ptr++) = (v >> 21) | B;
*(ptr++) = v >> 28;
}
return reinterpret_cast<char*>(ptr);
}
void PutVarint32(std::string* dst, uint32_t v) {
char buf[5];
char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf);
}
char* EncodeVarint64(char* dst, uint64_t v) {
static const int B = 128;
uint8_t* ptr = reinterpret_cast<uint8_t*>(dst);
while (v >= B) {
*(ptr++) = v | B;
v >>= 7;
}
*(ptr++) = static_cast<uint8_t>(v);
return reinterpret_cast<char*>(ptr);
}
void PutVarint64(std::string* dst, uint64_t v) {
char buf[10];
char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf);
}
void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
PutVarint32(dst, value.size());
dst->append(value.data(), value.size());
}
int VarintLength(uint64_t v) {
int len = 1;
while (v >= 128) {
v >>= 7;
len++;
}
return len;
}
const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value) {
uint32_t result = 0;
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
uint32_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint32(Slice* input, uint32_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint32Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
return true;
}
}
const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) {
uint64_t result = 0;
for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) {
uint64_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr;
}
bool GetVarint64(Slice* input, uint64_t* value) {
const char* p = input->data();
const char* limit = p + input->size();
const char* q = GetVarint64Ptr(p, limit, value);
if (q == nullptr) {
return false;
} else {
*input = Slice(q, limit - q);
return true;
}
}
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
uint32_t len;
if (GetVarint32(input, &len) && input->size() >= len) {
*result = Slice(input->data(), len);
input->remove_prefix(len);
return true;
} else {
return false;
}
}
} | #include "util/coding.h"
#include <vector>
#include "gtest/gtest.h"
namespace leveldb {
TEST(Coding, Fixed32) {
std::string s;
for (uint32_t v = 0; v < 100000; v++) {
PutFixed32(&s, v);
}
const char* p = s.data();
for (uint32_t v = 0; v < 100000; v++) {
uint32_t actual = DecodeFixed32(p);
ASSERT_EQ(v, actual);
p += sizeof(uint32_t);
}
}
TEST(Coding, Fixed64) {
std::string s;
for (int power = 0; power <= 63; power++) {
uint64_t v = static_cast<uint64_t>(1) << power;
PutFixed64(&s, v - 1);
PutFixed64(&s, v + 0);
PutFixed64(&s, v + 1);
}
const char* p = s.data();
for (int power = 0; power <= 63; power++) {
uint64_t v = static_cast<uint64_t>(1) << power;
uint64_t actual;
actual = DecodeFixed64(p);
ASSERT_EQ(v - 1, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 0, actual);
p += sizeof(uint64_t);
actual = DecodeFixed64(p);
ASSERT_EQ(v + 1, actual);
p += sizeof(uint64_t);
}
}
TEST(Coding, EncodingOutput) {
std::string dst;
PutFixed32(&dst, 0x04030201);
ASSERT_EQ(4, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
dst.clear();
PutFixed64(&dst, 0x0807060504030201ull);
ASSERT_EQ(8, dst.size());
ASSERT_EQ(0x01, static_cast<int>(dst[0]));
ASSERT_EQ(0x02, static_cast<int>(dst[1]));
ASSERT_EQ(0x03, static_cast<int>(dst[2]));
ASSERT_EQ(0x04, static_cast<int>(dst[3]));
ASSERT_EQ(0x05, static_cast<int>(dst[4]));
ASSERT_EQ(0x06, static_cast<int>(dst[5]));
ASSERT_EQ(0x07, static_cast<int>(dst[6]));
ASSERT_EQ(0x08, static_cast<int>(dst[7]));
}
TEST(Coding, Varint32) {
std::string s;
for (uint32_t i = 0; i < (32 * 32); i++) {
uint32_t v = (i / 32) << (i % 32);
PutVarint32(&s, v);
}
const char* p = s.data();
const char* limit = p + s.size();
for (uint32_t i = 0; i < (32 * 32); i++) {
uint32_t expected = (i / 32) << (i % 32);
uint32_t actual;
const char* start = p;
p = GetVarint32Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(expected, actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, s.data() + s.size());
}
TEST(Coding, Varint64) {
std::vector<uint64_t> values;
values.push_back(0);
values.push_back(100);
values.push_back(~static_cast<uint64_t>(0));
values.push_back(~static_cast<uint64_t>(0) - 1);
for (uint32_t k = 0; k < 64; k++) {
const uint64_t power = 1ull << k;
values.push_back(power);
values.push_back(power - 1);
values.push_back(power + 1);
}
std::string s;
for (size_t i = 0; i < values.size(); i++) {
PutVarint64(&s, values[i]);
}
const char* p = s.data();
const char* limit = p + s.size();
for (size_t i = 0; i < values.size(); i++) {
ASSERT_TRUE(p < limit);
uint64_t actual;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, limit);
}
TEST(Coding, Varint32Overflow) {
uint32_t result;
std::string input("\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint32Truncation) {
uint32_t large_value = (1u << 31) + 100;
std::string s;
PutVarint32(&s, large_value);
uint32_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Varint64Overflow) {
uint64_t result;
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
&result) == nullptr);
}
TEST(Coding, Varint64Truncation) {
uint64_t large_value = (1ull << 63) + 100ull;
std::string s;
PutVarint64(&s, large_value);
uint64_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
}
TEST(Coding, Strings) {
std::string s;
PutLengthPrefixedSlice(&s, Slice(""));
PutLengthPrefixedSlice(&s, Slice("foo"));
PutLengthPrefixedSlice(&s, Slice("bar"));
PutLengthPrefixedSlice(&s, Slice(std::string(200, 'x')));
Slice input(s);
Slice v;
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("foo", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ("bar", v.ToString());
ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v));
ASSERT_EQ(std::string(200, 'x'), v.ToString());
ASSERT_EQ("", input.ToString());
}
} | void PutVarint64(std::string* dst, uint64_t v) {
char buf[10];
char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf);
} | TEST(Coding, Varint64) {
std::vector<uint64_t> values;
values.push_back(0);
values.push_back(100);
values.push_back(~static_cast<uint64_t>(0));
values.push_back(~static_cast<uint64_t>(0) - 1);
for (uint32_t k = 0; k < 64; k++) {
const uint64_t power = 1ull << k;
values.push_back(power);
values.push_back(power - 1);
values.push_back(power + 1);
}
std::string s;
for (size_t i = 0; i < values.size(); i++) {
PutVarint64(&s, values[i]);
}
const char* p = s.data();
const char* limit = p + s.size();
for (size_t i = 0; i < values.size(); i++) {
ASSERT_TRUE(p < limit);
uint64_t actual;
const char* start = p;
p = GetVarint64Ptr(p, limit, &actual);
ASSERT_TRUE(p != nullptr);
ASSERT_EQ(values[i], actual);
ASSERT_EQ(VarintLength(actual), p - start);
}
ASSERT_EQ(p, limit);
}
TEST(Coding, Varint64Truncation) {
uint64_t large_value = (1ull << 63) + 100ull;
std::string s;
PutVarint64(&s, large_value);
uint64_t result;
for (size_t len = 0; len < s.size() - 1; len++) {
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
}
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
nullptr);
ASSERT_EQ(large_value, result);
} |
#include "xla/service/gpu/autotuner_util.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SHA256.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr int kVersion = 3;
}
using AutotuneCacheMap = absl::flat_hash_map<AutotuneCacheKey, AutotuneResult>;
static absl::Mutex autotune_cache_mu(absl::kConstInit);
static auto& autotune_cache ABSL_GUARDED_BY(autotune_cache_mu) =
*new AutotuneCacheMap();
absl::StatusOr<std::string> GetBase64EncodedSha256Hash(absl::string_view s) {
llvm::SHA256 sha256;
sha256.update(llvm::StringRef(s));
std::array<uint8_t, 32> hash = sha256.final();
absl::string_view hash_view(reinterpret_cast<const char*>(hash.data()),
hash.size());
std::string base64_encoded_hash;
TF_RETURN_IF_ERROR(tsl::Base64Encode(hash_view, &base64_encoded_hash));
return base64_encoded_hash;
}
namespace {
absl::StatusOr<std::string> GetCacheFilePath(absl::string_view cache_dir,
const AutotuneCacheKey& key) {
if (cache_dir.empty()) {
return absl::InvalidArgumentError("autotune_cache_dir should not be empty");
}
TF_ASSIGN_OR_RETURN(std::string key_hash,
GetBase64EncodedSha256Hash(key.ToString()));
return tsl::io::JoinPath(cache_dir, absl::StrCat(key_hash, ".textproto"));
}
struct ResultAndInserted {
AutotuneResult result;
bool inserted;
};
ResultAndInserted AddResultToInMemoryCache(const AutotuneCacheKey& key,
AutotuneResult result)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto [it, inserted] = autotune_cache.emplace(key, std::move(result));
return {it->second, inserted};
}
absl::Status AddResultToFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
AutotuneResult result,
std::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key));
VLOG(1) << "Writing autotune result to file: " << file_path;
std::string result_str;
if (!tsl::protobuf::TextFormat::PrintToString(result, &result_str)) {
return absl::InternalError("Failed to serialize autotune result.");
}
std::string temp_file_path = tsl::io::GetTempFilename(".textproto");
tsl::Env* default_env = tsl::Env::Default();
TF_RETURN_IF_ERROR(
tsl::WriteStringToFile(default_env, temp_file_path, result_str));
return default_env->RenameFile(temp_file_path, file_path);
}
absl::StatusOr<ResultAndInserted> AddResultToCaches(const AutotuneCacheKey& key,
AutotuneResult result,
std::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
ResultAndInserted result_and_inserted = AddResultToInMemoryCache(key, result);
if (result_and_inserted.inserted) {
TF_RETURN_IF_ERROR(AddResultToFileBasedCacheIfEnabled(
key, result_and_inserted.result, cache_dir));
}
return result_and_inserted;
}
std::optional<AutotuneResult> TryToFindInInMemoryCache(
const AutotuneCacheKey& key) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
absl::MutexLock lock(&autotune_cache_mu);
auto it = autotune_cache.find(key);
if (it == autotune_cache.end()) {
return std::nullopt;
}
return it->second;
}
absl::StatusOr<std::optional<AutotuneResult>>
TryToFindInFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return std::nullopt;
}
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key));
if (!tsl::Env::Default()->FileExists(file_path).ok()) {
VLOG(1) << "Autotune result file not found: " << file_path;
return std::nullopt;
}
VLOG(1) << "Autotune result file found: " << file_path;
std::string autotune_result_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), file_path,
&autotune_result_str));
AutotuneResult result;
if (!tsl::protobuf::TextFormat::ParseFromString(autotune_result_str,
&result)) {
return absl::InvalidArgumentError("Failed to parse autotune result.");
}
return result;
}
void SortAutotuneResults(AutotuneResults* results) {
std::sort(results->mutable_results()->pointer_begin(),
results->mutable_results()->pointer_end(),
[](const auto* a, const auto* b) {
return std::make_pair(absl::string_view(a->device()),
absl::string_view(a->hlo())) <
std::make_pair(absl::string_view(b->device()),
absl::string_view(b->hlo()));
});
}
}
absl::StatusOr<std::string> AutotuneResultsToString(
const AutotuneResults& results, bool as_textproto) {
if (as_textproto) {
std::string textproto;
if (tsl::protobuf::TextFormat::PrintToString(results, &textproto)) {
return textproto;
} else {
return Internal("Failed to serialize autotune results.");
}
}
return results.SerializeAsString();
}
namespace {
void SerializeAutotuneEntry(AutotuneResults* results, const AutotuneCacheKey& k,
const AutotuneResult* res) {
auto& entry = *results->add_results();
entry.set_device(std::string(k.GetModelStr()));
entry.set_hlo(std::string(k.GetHlo()));
*entry.mutable_result() = *res;
}
}
absl::Status AutotunerUtil::SerializeAutotuneResults(
AutotuneResults* results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const auto& [k, result] : autotune_cache) {
SerializeAutotuneEntry(results, k, &result);
}
results->set_version(kVersion);
SortAutotuneResults(results);
return absl::OkStatus();
}
absl::Status AutotunerUtil::LoadAutotuneResults(
const AutotuneResults& results) {
absl::MutexLock lock(&autotune_cache_mu);
for (const AutotuneResults::Entry& result : results.results()) {
if (auto [it, inserted] = autotune_cache.emplace(
AutotuneCacheKey(result.device(), result.hlo()), result.result());
!inserted) {
return absl::InternalError(absl::StrCat(
"Duplicate autotuning result for ", it->first.ToString()));
}
}
return absl::OkStatus();
}
void AutotunerUtil::ClearAutotuneResults() {
absl::MutexLock lock(&autotune_cache_mu);
autotune_cache.clear();
}
bool AutotunerUtil::ResultCacheIsEmpty() {
absl::MutexLock lock(&autotune_cache_mu);
return autotune_cache.empty();
}
absl::StatusOr<se::DeviceMemoryBase> AutotunerUtil::CreateBuffer(
se::RedzoneAllocator& allocator, const Shape& shape,
const AutotuneConfig& config, int64_t& rng_state) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase buffer,
allocator.AllocateBytes(ShapeUtil::ByteSizeOf(shape)));
if (config.should_init_buffers()) {
InitializeBuffer(allocator.stream(), shape.element_type(), &rng_state,
buffer);
}
return buffer;
}
namespace {
std::string ToCanonicalString(const HloInstruction* instr) {
auto options = HloPrintOptions::Canonical();
if (instr->opcode() != HloOpcode::kFusion) {
options.set_print_backend_config(true);
return instr->ToString(options);
}
options.set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff);
options.set_print_infeed_outfeed_config(false);
options.set_print_only_essential_constants(true);
options.set_print_operand_shape(true);
options.set_print_ids(false);
options.set_canonicalize_computations(true);
return instr->called_computations()[0]->ToString(options);
}
}
AutotuneCacheKey::AutotuneCacheKey(absl::string_view model_str,
const HloInstruction& instr)
: AutotuneCacheKey(model_str, ToCanonicalString(&instr)) {}
namespace {
absl::StatusOr<std::optional<AutotuneResult>> TryFindInCache(
const AutotuneCacheKey& key, absl::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
std::optional<AutotuneResult> opt_result = TryToFindInInMemoryCache(key);
if (opt_result.has_value()) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "In-memory autotune cache hit";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "In-memory autotune cache hit: key = " << key.ToString();
}
return opt_result;
}
TF_ASSIGN_OR_RETURN(opt_result,
TryToFindInFileBasedCacheIfEnabled(key, cache_dir));
if (opt_result.has_value()) {
AddResultToInMemoryCache(key, opt_result.value());
if (VLOG_IS_ON(1)) {
LOG(INFO) << "File-based autotune cache hit";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "File-based autotune cache hit: key = " << key.ToString();
}
return opt_result;
}
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Autotune cache miss";
} else if (VLOG_IS_ON(2)) {
LOG(INFO) << "Autotune cache miss: key = " << key.ToString();
}
return std::nullopt;
}
}
AutotuneCacheKey AutotunerUtil::GetKey(
const HloInstruction* instr, const AutotuneConfig& config) {
return AutotuneCacheKey(config.GetModelStr(), *instr);
}
absl::StatusOr<bool> AutotunerUtil::IsInCache(
const AutotuneCacheKey& key, const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
return opt_res.has_value();
}
absl::StatusOr<bool> AutotunerUtil::AddResult(
const AutotuneCacheKey& key, AutotuneResult result,
const AutotuneConfig& config) {
TF_ASSIGN_OR_RETURN(
ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(result), config.autotune_cache_dir()));
return result_and_inserted.inserted;
}
absl::StatusOr<AutotuneResult> AutotunerUtil::Autotune(
const HloInstruction* instr, const AutotuneConfig& config,
const AutotuneNoCacheFn& autotune_fn) {
const AutotuneCacheKey key = GetKey(instr, config);
TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res,
TryFindInCache(key, config.autotune_cache_dir()));
if (opt_res.has_value()) {
return opt_res.value();
}
if (config.should_require_complete_aot_autotune_results()) {
return NotFound(
"Complete XLA AOT autotuning results are required, but no AOT result "
"was found for key: %s",
key.ToString());
}
TF_ASSIGN_OR_RETURN(AutotuneResult autotune_result, autotune_fn());
TF_ASSIGN_OR_RETURN(ResultAndInserted result_and_inserted,
AddResultToCaches(key, std::move(autotune_result),
config.autotune_cache_dir()));
return result_and_inserted.result;
}
namespace {
bool IsTextProtoPath(absl::string_view file_path) {
return absl::EndsWith(file_path, ".txt") ||
absl::EndsWith(file_path, ".textproto") ||
absl::EndsWith(file_path, ".prototxt") ||
absl::EndsWith(file_path, ".pbtxt");
}
}
absl::Status AutotunerUtil::LoadAutotuneResults(
absl::string_view data, bool as_textproto) {
AutotuneResults results;
bool parse_success =
as_textproto ? tsl::protobuf::TextFormat::ParseFromString(
std::string(data), &results)
: results.ParseFromString(std::string(data));
if (!parse_success) {
return absl::InvalidArgumentError(
"Failed to parse autotune results string.");
}
if (results.version() != kVersion) {
return absl::InvalidArgumentError(absl::StrFormat(
"Version mismatch in autotune results. Expected %d but was %d",
kVersion, results.version()));
}
TF_RETURN_IF_ERROR(LoadAutotuneResults(results));
return absl::OkStatus();
}
absl::StatusOr<std::string> AutotunerUtil::SerializeAutotuneResults(
bool as_textproto) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return AutotuneResultsToString(results, as_textproto);
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
const AutotuneResults& results, absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
TF_RET_CHECK(results.version() > 0)
<< "Did you call SerializeAutotuneResults to get this AutotuneResults?";
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
TF_ASSIGN_OR_RETURN(
std::string autotune_results_str,
AutotuneResultsToString(results, IsTextProtoPath(resolved_path)));
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), resolved_path,
autotune_results_str));
LOG(INFO) << "Autotune results serialized to file: " << resolved_path;
return absl::OkStatus();
}
absl::Status AutotunerUtil::SerializeAutotuneResultsToFile(
absl::string_view file_path) {
AutotuneResults results;
TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results));
return SerializeAutotuneResultsToFile(results, file_path);
}
absl::Status AutotunerUtil::LoadAutotuneResultsFromFile(
absl::string_view file_path) {
TF_RET_CHECK(!file_path.empty());
std::string resolved_path;
if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) {
return FailedPrecondition("File path can not be resolved: %s", file_path);
}
if (!tsl::Env::Default()->FileExists(resolved_path).ok()) {
return FailedPrecondition("Autotune results file does not exist: %s",
resolved_path);
}
std::string autotune_results_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), resolved_path,
&autotune_results_str));
TF_RETURN_IF_ERROR(LoadAutotuneResults(autotune_results_str,
IsTextProtoPath(resolved_path)));
LOG(INFO) << "Autotune results loaded from file: " << resolved_path;
return absl::OkStatus();
}
absl::StatusOr<se::RedzoneAllocator>
AutotunerUtil::CreateRedzoneAllocator(const AutotuneConfig& config,
const DebugOptions& opts) {
TF_ASSIGN_OR_RETURN(se::Stream * stream, config.GetStream());
return se::RedzoneAllocator(
stream, config.GetAllocator(), PtxOptsFromDebugOptions(opts),
std::numeric_limits<int64_t>::max(),
config.should_check_correctness()
? opts.xla_gpu_redzone_padding_bytes()
: 0);
}
}
} | #include "xla/service/gpu/autotuner_util.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::TempDir;
using ::tsl::testing::StatusIs;
class AutotunerUtilTest : public HloTestBase {
protected:
static constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = f16[1,16,17,3] parameter(0)
p1 = s8[16,17,3] parameter(1)
cp1 = f16[16,17,3] convert(p1)
ROOT _ = f16[1,16,16] dot(p0, cp1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
})";
static constexpr absl::string_view kResultText = R"(
version: 3
results {
device: "sm_8.0 with 42331013120B RAM, 108 cores, 1410000KHz clock, 1215000KHz mem clock, 41943040B L2$"
hlo: "{\n tmp_0 = f16[1,16,17,3]{3,2,1,0} parameter(0)\n tmp_1 = f16[16,51]{1,0} bitcast(f16[1,16,17,3]{3,2,1,0} tmp_0)\n tmp_2 = s8[16,17,3]{2,1,0} parameter(1)\n tmp_3 = s8[51,16]{0,1} bitcast(s8[16,17,3]{2,1,0} tmp_2)\n tmp_4 = f16[51,16]{0,1} convert(s8[51,16]{0,1} tmp_3)\n tmp_5 = f16[16,16]{1,0} dot(f16[16,51]{1,0} tmp_1, f16[51,16]{0,1} tmp_4), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tmp_6 = f16[1,16,16]{2,1,0} bitcast(f16[16,16]{1,0} tmp_5)\n}"
result {
run_time {
nanos: 31744
}
triton {
block_m: 32
block_n: 32
block_k: 32
split_k: 1
num_stages: 1
num_warps: 4
num_ctas: 1
}
}
})";
void SetUp() override { AutotunerUtil::ClearAutotuneResults(); }
std::string GetUniqueTempFilePath(absl::string_view suffix) {
std::string filename = TempDir();
CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,
std::string(suffix)));
return filename;
}
std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {
std::string str;
tsl::Env* env = tsl::Env::Default();
TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));
EXPECT_THAT(str, Not(IsEmpty()));
return str;
}
static std::unique_ptr<stream_executor::StreamExecutor> NewStreamExecutor() {
stream_executor::Platform* platform =
stream_executor::PlatformManager::PlatformWithName("Host").value();
stream_executor::StreamExecutorConfig config(0);
return platform->GetUncachedExecutor(config).value();
}
absl::Status PopulateResultCache() {
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_RETURN_IF_ERROR(AutotunerUtil::LoadAutotuneResults(kResultText, true));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
return absl::OkStatus();
}
};
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
EXPECT_GT(results.results_size(), 0);
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,
&results));
}
TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath);
AutotuneResults results;
EXPECT_TRUE(results.ParseFromString(autotune_results_str));
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto1) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".txt");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto2) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".textproto");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_Protobuf) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
AutotunerUtil::ClearAutotuneResults();
EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty());
TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath));
EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty());
}
TEST_F(AutotunerUtilTest, ResultConflictsAreDetected) {
TF_EXPECT_OK(PopulateResultCache());
std::string kFilePath = GetUniqueTempFilePath(".pb");
TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath));
EXPECT_THAT(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Duplicate autotuning result")));
}
TEST_F(AutotunerUtilTest, FailIfRequireCompleteAotAutotuning) {
std::string kFilePath = GetUniqueTempFilePath(".txt");
auto hlo_module = GetOptimizedModule(kHloText);
TF_EXPECT_OK(hlo_module.status());
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
std::unique_ptr<stream_executor::StreamExecutor> executor =
NewStreamExecutor();
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor.get()}, options);
EXPECT_THAT(
AutotunerUtil::Autotune(instruction, config,
[&] { return AutotuneResult(); }),
StatusIs(
absl::StatusCode::kNotFound,
HasSubstr("Complete XLA AOT autotuning results are required, but "
"no AOT result was found for key: <key model")));
}
TEST_F(AutotunerUtilTest, OkIfJitAutotuningDisabledButAlreadyLoadedAOT) {
auto hlo_module = GetOptimizedModule(kHloText);
std::vector<HloComputation*> computations =
(*hlo_module)
->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>());
EXPECT_THAT(computations, Not(IsEmpty()));
const HloInstruction* instruction = *computations[0]->instructions().begin();
std::unique_ptr<stream_executor::StreamExecutor> executor =
NewStreamExecutor();
{
AutotuneConfig config(DeviceConfig{executor.get()}, DebugOptions());
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
}
auto options = DebugOptions();
options.set_xla_gpu_require_complete_aot_autotune_results(true);
AutotuneConfig config(DeviceConfig{executor.get()}, options);
TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] {
return AutotuneResult();
}).status());
}
class FileBasedCacheTest : public AutotunerUtilTest {
public:
static std::string ToString(const proto2::Message& message) {
std::string textproto;
CHECK(tsl::protobuf::TextFormat::PrintToString(message, &textproto));
return textproto;
}
static std::vector<std::string> GetFilesInDir(
const absl::string_view cache_dir) {
std::vector<std::string> files_in_cache;
TF_CHECK_OK(tsl::Env::Default()->GetChildren(std::string(cache_dir),
&files_in_cache));
return files_in_cache;
}
static std::string Read(const absl::string_view filepath) {
std::string file_content;
TF_CHECK_OK(tsl::ReadFileToString(tsl::Env::Default(),
std::string(filepath), &file_content));
return file_content;
}
static void Write(const absl::string_view filepath,
const absl::string_view content) {
TF_CHECK_OK(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(filepath), content));
}
std::unique_ptr<stream_executor::StreamExecutor> executor_ =
NewStreamExecutor();
std::unique_ptr<HloModule> module_ =
ParseAndReturnVerifiedModule(kHloText).value();
const HloInstruction* dot_ = hlo_query::GetFirstInstructionWithOpcode(
*module_->entry_computation(), HloOpcode::kDot);
std::string cache_dir_ = [] {
tsl::Env* default_env = tsl::Env::Default();
std::string cache_dir;
CHECK(default_env->LocalTempFilename(&cache_dir));
CHECK_OK(default_env->CreateDir(cache_dir));
return cache_dir;
}();
AutotuneConfig config_ = AutotuneConfig(DeviceConfig{executor_.get()}, [&] {
DebugOptions options;
options.set_xla_gpu_per_fusion_autotune_cache_dir(cache_dir_);
return options;
}());
AutotuneCacheKey cache_key_ = AutotunerUtil::GetKey(dot_, config_);
std::string cache_filename_ = [&] {
absl::StatusOr<std::string> key_hash =
GetBase64EncodedSha256Hash(cache_key_.ToString());
CHECK_OK(key_hash.status());
return absl::StrCat(key_hash.value(), ".textproto");
}();
std::string cache_file_path_ = tsl::io::JoinPath(cache_dir_, cache_filename_);
const AutotuneResult result1_ = [] {
AutotuneResult result;
result.set_scratch_bytes(1);
return result;
}();
const AutotuneResult result2_ = [] {
AutotuneResult result;
result.set_scratch_bytes(2);
return result;
}();
};
TEST_F(FileBasedCacheTest, AutotuneWritesResultToTheCacheDir) {
TF_ASSERT_OK_AND_ASSIGN(
AutotuneResult result,
AutotunerUtil::Autotune(dot_, config_, [&] { return result1_; }));
EXPECT_EQ(ToString(result), ToString(result1_));
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
}
TEST_F(FileBasedCacheTest, AutotuneReadsResultFromTheCacheDir) {
Write(cache_file_path_, ToString(result1_));
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(dot_, config_, [&] {
cache_hit = false;
return result2_;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(ToString(result), ToString(result1_));
}
TEST_F(FileBasedCacheTest,
RepeatedAutotuneCallsDontReadOrWriteTheCacheFileAgain) {
auto check_autotune_cache_hit = [](const HloInstruction* instr,
const AutotuneConfig& config,
const AutotuneResult& expected_result) {
bool cache_hit = true;
TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result,
AutotunerUtil::Autotune(instr, config, [&] {
cache_hit = false;
AutotuneResult new_result;
new_result.set_scratch_bytes(2);
return new_result;
}));
EXPECT_TRUE(cache_hit);
EXPECT_EQ(ToString(result), ToString(expected_result));
};
Write(cache_file_path_, ToString(result1_));
check_autotune_cache_hit(dot_, config_, result1_);
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path_, kPlaceholderContent);
check_autotune_cache_hit(dot_, config_, result1_);
EXPECT_EQ(Read(cache_file_path_), kPlaceholderContent);
}
TEST_F(FileBasedCacheTest,
IsInCacheReturnsTrueIfTheResultIsInTheFileBasedCache) {
Write(cache_file_path_, ToString(result1_));
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(cache_key_, config_));
EXPECT_TRUE(is_in_cache);
}
TEST_F(FileBasedCacheTest, IsInCacheReturnsFalseIfTheResultIsNotInEitherCache) {
TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache,
AutotunerUtil::IsInCache(cache_key_, config_));
EXPECT_FALSE(is_in_cache);
}
TEST_F(FileBasedCacheTest, AddResultAddsTheResultToTheFileBasedCache) {
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_TRUE(added);
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
}
TEST_F(FileBasedCacheTest, RepeatedAddResultDoesNotWriteTheFileAgain) {
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_TRUE(added);
}
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
constexpr absl::string_view kPlaceholderContent = "placeholder content";
Write(cache_file_path_, kPlaceholderContent);
{
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_FALSE(added);
}
EXPECT_EQ(Read(cache_file_path_), kPlaceholderContent);
}
}
}
} | absl::Status AddResultToFileBasedCacheIfEnabled(const AutotuneCacheKey& key,
AutotuneResult result,
std::string_view cache_dir)
ABSL_LOCKS_EXCLUDED(autotune_cache_mu) {
if (cache_dir.empty()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(const std::string file_path,
GetCacheFilePath(cache_dir, key));
VLOG(1) << "Writing autotune result to file: " << file_path;
std::string result_str;
if (!tsl::protobuf::TextFormat::PrintToString(result, &result_str)) {
return absl::InternalError("Failed to serialize autotune result.");
}
std::string temp_file_path = tsl::io::GetTempFilename(".textproto");
tsl::Env* default_env = tsl::Env::Default();
TF_RETURN_IF_ERROR(
tsl::WriteStringToFile(default_env, temp_file_path, result_str));
return default_env->RenameFile(temp_file_path, file_path);
} | TEST_F(FileBasedCacheTest, AutotuneWritesResultToTheCacheDir) {
TF_ASSERT_OK_AND_ASSIGN(
AutotuneResult result,
AutotunerUtil::Autotune(dot_, config_, [&] { return result1_; }));
EXPECT_EQ(ToString(result), ToString(result1_));
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
}
TEST_F(FileBasedCacheTest, AddResultAddsTheResultToTheFileBasedCache) {
TF_ASSERT_OK_AND_ASSIGN(
bool added, AutotunerUtil::AddResult(cache_key_, result1_, config_));
EXPECT_TRUE(added);
ASSERT_THAT(GetFilesInDir(cache_dir_), ElementsAre(cache_filename_));
EXPECT_EQ(Read(cache_file_path_), ToString(result1_));
} |
#include "tensorflow/core/framework/full_type_inference_util.h"
#include <functional>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/full_type_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace full_type {
TypeInferenceFn KeepExisting() { return nullptr; }
TypeInferenceFn Tensor(FullTypeId t) {
return [t](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets) {
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
ret_type.add_args()->set_type_id(TFT_TENSOR);
ret_type.mutable_args(0)->add_args()->set_type_id(t);
return ret_type;
};
}
TypeInferenceFn ReplicateInput(int i, int n) {
return [i, n](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets) {
const FullTypeDef& in_type = input_types.at(i).get();
FullTypeDef ret_type;
if (in_type.type_id() != TFT_UNSET) {
ret_type.set_type_id(TFT_PRODUCT);
for (int k = 0; k < n; k++) {
*(ret_type.add_args()) = in_type;
}
}
return ret_type;
};
}
TypeInferenceFn Merge() {
return [](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(!input_types.empty());
FullTypeDef merged;
for (int i = 0; i < input_types.size(); i++) {
const auto& t = input_types[i].get();
if (t.type_id() == TFT_UNSET) {
continue;
}
if (IsSubtype(t, merged)) {
merged = t;
continue;
}
if (IsSubtype(merged, t)) {
continue;
}
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected compatible input types, but input ",
i, ":\n", t.DebugString(),
" is neither a subtype nor a supertype of the "
"combined inputs preceding it:\n",
merged.DebugString()));
}
FullTypeDef ret_type;
if (merged.type_id() != TFT_UNSET) {
ret_type.set_type_id(TFT_PRODUCT);
*(ret_type.add_args()) = merged;
}
return ret_type;
};
}
TypeInferenceFn Encode(FullTypeId t, int i) {
return [t, i](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= i);
FullTypeDef ret_type;
const FullTypeDef& in_t = input_types[i].get();
if (in_t.type_id() == TFT_UNSET) {
return ret_type;
}
ret_type.set_type_id(TFT_PRODUCT);
auto* enc_type = ret_type.add_args();
enc_type->set_type_id(TFT_ENCODED);
*enc_type->add_args() = in_t;
enc_type->add_args()->set_type_id(t);
return ret_type;
};
}
TypeInferenceFn Decode(FullTypeId t, int i) {
return [t, i](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= i);
const FullTypeDef& in_t = input_types[i].get();
const FullTypeId enc_tid = GetArgDefaultUnset(in_t, 1).type_id();
if ((enc_tid != TFT_UNSET) && (enc_tid != t)) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected encoded type ", t, " for input ", i,
", got ", in_t.DebugString()));
}
FullTypeDef ret_type;
const FullTypeDef& out_t = GetArgDefaultUnset(in_t, 0);
if (in_t.type_id() == TFT_UNSET) {
return ret_type;
}
ret_type.set_type_id(TFT_PRODUCT);
*ret_type.add_args() = out_t;
return ret_type;
};
}
TypeInferenceFn UnaryContainerCreate(FullTypeId t, int element_idx) {
return [t, element_idx](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= element_idx);
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* arg_t = ret_type.add_args();
arg_t->set_type_id(t);
*(arg_t->add_args()) = input_types[element_idx].get();
return ret_type;
};
}
TypeInferenceFn UnaryContainerAdd(FullTypeId t, int container_idx,
int element_idx, bool homogeneous) {
return [t, container_idx, element_idx, homogeneous](
const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= container_idx);
DCHECK(input_types.size() >= element_idx);
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* cont_t = ret_type.add_args();
cont_t->set_type_id(t);
const FullTypeDef& in_cont_t = input_types[container_idx].get();
const FullTypeDef& in_el_t = input_types[element_idx].get();
if (in_cont_t.type_id() != TFT_UNSET) {
if (in_cont_t.type_id() != t) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("expected container type ", t, " for input ",
container_idx, ", got ", in_cont_t.DebugString()));
}
*cont_t = in_cont_t;
}
VLOG(1) << "ContainerAddUnary: " << cont_t->DebugString() << ", "
<< in_el_t.DebugString() << ", " << container_idx << "; "
<< element_idx;
for (const auto& tmp : input_types) {
VLOG(1) << " input: " << tmp.get().DebugString();
}
if (in_el_t.type_id() == TFT_UNSET) {
return ret_type;
}
const FullTypeDef& el_t = GetArgDefaultUnset(*cont_t, 0);
if (el_t.type_id() == TFT_UNSET) {
cont_t->clear_args();
*(cont_t->add_args()) = in_el_t;
return ret_type;
}
if (IsSubtype(in_el_t, el_t)) {
return ret_type;
}
if (homogeneous) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected a subtype of ", el_t.DebugString(),
" for input ", element_idx,
" of a homogeneous container ", t, ", got ",
in_el_t.DebugString()));
} else {
return Status(
absl::StatusCode::kUnimplemented,
absl::StrCat("need union types for heterogeneous containers.\n"
"A homogeneous container would expect a subtype of ",
el_t.DebugString(), " for input ", element_idx,
", but got ", in_el_t.DebugString()));
}
};
}
TypeInferenceFn MultiaryUnstack(
FullTypeId t, std::function<FullTypeDef(const FullTypeDef&)> unstack) {
return [t, unstack](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* cont_t = ret_type.add_args();
cont_t->set_type_id(t);
FullTypeDef* el_t = cont_t->add_args();
el_t->set_type_id(TFT_PRODUCT);
for (int element_idx = 0; element_idx < input_types.size(); ++element_idx) {
*(el_t->add_args()) = unstack(input_types[element_idx].get());
}
return ret_type;
};
}
FullTypeDef UnstackTensor(const FullTypeDef& t) {
DCHECK((t.type_id() == TFT_TENSOR) || (t.type_id() == TFT_RAGGED) ||
(t.type_id() == TFT_UNSET));
DCHECK_LE(t.args_size(), 1);
return t;
}
TypeInferenceFn ContainerMap(
FullTypeId t, int input_idx,
std::function<FullTypeDef(const FullTypeDef&)> map) {
return [t, input_idx, map](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK_GE(input_types.size(), input_idx);
const FullTypeDef& in_cont_t = input_types.at(input_idx).get();
FullTypeDef ret_type;
if (in_cont_t.type_id() == TFT_UNSET) {
return ret_type;
}
if (in_cont_t.type_id() != t) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected type ", t, " for input ", input_idx,
", got ", in_cont_t.DebugString()));
}
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* out_cont_t = ret_type.add_args();
out_cont_t->set_type_id(t);
const FullTypeDef& in_el_t = GetArgDefaultUnset(in_cont_t, 0);
if (in_el_t.type_id() == TFT_UNSET) {
return ret_type;
}
if (in_el_t.type_id() != TFT_PRODUCT) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected PRODUCT element type for input ",
input_idx, ", got ", in_el_t.DebugString()));
}
FullTypeDef* out_el_t = out_cont_t->add_args();
out_el_t->set_type_id(TFT_PRODUCT);
for (int k = 0; k < in_el_t.args_size(); k++) {
*(out_el_t->add_args()) = map(in_el_t.args(k));
}
return ret_type;
};
}
TypeInferenceFn MapCovariant(FullTypeId t, FullTypeId u, int input_idx) {
return
[t, u, input_idx](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK_GE(input_types.size(), input_idx);
const FullTypeDef& in_t = input_types.at(input_idx).get();
FullTypeDef ret_type;
if (in_t.type_id() == TFT_UNSET) {
return ret_type;
}
if (in_t.type_id() != t) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected type ", t, " for input ",
input_idx, ", got ", in_t.DebugString()));
}
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* t = ret_type.add_args();
t->set_type_id(u);
*t->mutable_args() = in_t.args();
return ret_type;
};
}
TypeInferenceFn FunctionCall(const string& func_attr_name) {
return [func_attr_name](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
return infer_function_rets(func_attr_name, input_types);
};
}
TypeInferenceFn Tuple(const std::vector<TypeInferenceFn>& func_list) {
return [func_list](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
for (const auto& func : func_list) {
const auto& status_or_t = func(input_types, infer_function_rets);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
status_or_t.status(),
absl::StrCat("for Tuple type infernce function ",
ret_type.args_size()));
const FullTypeDef& t = status_or_t.value();
if (t.type_id() == TFT_UNSET) {
VLOG(1) << "For Tuple type inference function, function "
<< ret_type.args_size() << " is unset.";
FullTypeDef unset_type;
return unset_type;
}
if (t.type_id() != TFT_PRODUCT) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("for Tuple type inference function, expected result "
"of type inference function ",
ret_type.args_size(),
" to start with TFT_PRODUCT not ", t.DebugString()));
}
for (int i = 0; i < t.args_size(); i++) {
*(ret_type.add_args()) = t.args(i);
}
}
return ret_type;
};
}
FullTypeDef BatchTensor(const FullTypeDef& t) {
return t;
}
FullTypeDef ShardTensor(const FullTypeDef& t) {
return t;
}
}
} | #include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace full_type {
namespace {
TEST(ReplicateInput, Default) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = ReplicateInput()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
}
TEST(ReplicateInput, Duplicate) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = ReplicateInput(0, 2)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(rt.args(1).type_id(), TFT_ARRAY);
}
TEST(ReplicateInput, FirstOfMultipleArgs) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret = ReplicateInput(0, 2)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(rt.args(1).type_id(), TFT_ARRAY);
}
TEST(ReplicateInput, SecondOfMultipleArgs) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret = ReplicateInput(1, 2)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(rt.args(1).type_id(), TFT_TENSOR);
}
TEST(ReplicateInput, Unset) {
FullTypeDef t;
const auto ret = ReplicateInput()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
}
TEST(Merge, Single) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = Merge()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
}
TEST(Merge, Double) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = Merge()({t, t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
}
TEST(Merge, Unset) {
FullTypeDef t;
t.set_type_id(TFT_UNSET);
const auto ret = Merge()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
}
TEST(Merge, UnsetComponents) {
FullTypeDef t1;
FullTypeDef t2;
const auto ret = Merge()({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
}
void ExpectInferredArrayOfTensor(absl::StatusOr<FullTypeDef> ret) {
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(Merge, RejectsMismatched) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret = Merge()({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected compatible input types"));
}
TEST(Merge, UsesPartialInfo) {
FullTypeDef t1;
FullTypeDef t2;
t2.set_type_id(TFT_ARRAY);
t2.add_args()->set_type_id(TFT_TENSOR);
ExpectInferredArrayOfTensor(Merge()({t1, t2}, {}));
ExpectInferredArrayOfTensor(Merge()({t2, t1}, {}));
}
TEST(Merge, SelectsMostSpecificOfSubtypes) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_ARRAY);
t2.add_args()->set_type_id(TFT_TENSOR);
ExpectInferredArrayOfTensor(Merge()({t1, t2}, {}));
ExpectInferredArrayOfTensor(Merge()({t2, t1}, {}));
}
TEST(UnaryContainerCreate, Basic) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
FullTypeDef t3;
t3.set_type_id(TFT_ANY);
const auto ret = UnaryContainerCreate(TFT_ARRAY, 1)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, Basic) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
FullTypeDef t3;
t3.set_type_id(TFT_ARRAY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 2, 1,
false)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, RejectsMismatchedContainerType) {
FullTypeDef t1;
t1.set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_DATASET);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 1, 0,
false)({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected container type"));
}
TEST(UnaryContainerAdd, IgnoresUnsetContainerType) {
FullTypeDef t1;
t1.set_type_id(TFT_TENSOR);
FullTypeDef t2;
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 1, 0,
false)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, UnsetElementTypeRemainsUnset) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef t2;
FullTypeDef t3;
t3.set_type_id(TFT_ARRAY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 2, 1,
false)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 0);
}
TEST(UnaryContainerAdd, UnsetElementTypeKeepsOriginalElementType) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
false)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, KeepsContainerTypeIfElementIsSubtype) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
true)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_ANY);
}
TEST(UnaryContainerAdd, RejectsMismatchedElementTypesHeterogenous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_DATASET);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
false)({t1, t2}, {});
EXPECT_THAT(ret.status().message(), ::testing::HasSubstr("need union types"));
}
TEST(UnaryContainerAdd, RejectsMismatchedElementTypesHomogeneous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_DATASET);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
true)({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected a subtype"));
}
TEST(UnaryContainerAdd, RejectsSupertypeElementTypeHeterogeneous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_ANY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
false)({t1, t2}, {});
EXPECT_THAT(ret.status().message(), ::testing::HasSubstr("need union types"));
}
TEST(UnaryContainerAdd, RejectsSupertypeElementTypeHomogeneous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_ANY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
true)({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected a subtype"));
}
TEST(MultiaryUnstack, Basic) {
FullTypeDef t1;
t1.set_type_id(TFT_TENSOR);
const auto ret = MultiaryUnstack(TFT_DATASET, UnstackTensor)({t1}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(MultiaryUnstack, Ternary) {
FullTypeDef t1;
t1.set_type_id(TFT_RAGGED);
t1.add_args()->set_type_id(TFT_STRING);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
FullTypeDef t3;
t3.set_type_id(TFT_RAGGED);
t3.add_args()->set_type_id(TFT_INT64);
const auto ret =
MultiaryUnstack(TFT_DATASET, UnstackTensor)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 3);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).args(0).type_id(), TFT_STRING);
ASSERT_EQ(rt.args(0).args(0).args(1).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(0).args(0).args(2).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(2).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(2).args(0).type_id(), TFT_INT64);
}
TEST(MapContainer, Basic) {
FullTypeDef cont_t;
cont_t.set_type_id(TFT_DATASET);
FullTypeDef* el_t = cont_t.add_args();
el_t->set_type_id(TFT_PRODUCT);
(el_t->add_args())->set_type_id(TFT_TENSOR);
const auto ret = ContainerMap(TFT_DATASET, 0, BatchTensor)({cont_t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(MapContainer, Ternary) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef cont_t;
cont_t.set_type_id(TFT_DATASET);
FullTypeDef* el_t = cont_t.add_args();
el_t->set_type_id(TFT_PRODUCT);
FullTypeDef* e1 = el_t->add_args();
e1->set_type_id(TFT_RAGGED);
e1->add_args()->set_type_id(TFT_STRING);
FullTypeDef* e2 = el_t->add_args();
e2->set_type_id(TFT_TENSOR);
FullTypeDef* e3 = el_t->add_args();
e3->set_type_id(TFT_RAGGED);
e3->add_args()->set_type_id(TFT_INT64);
FullTypeDef t3;
t3.set_type_id(TFT_ANY);
const auto ret =
ContainerMap(TFT_DATASET, 1, BatchTensor)({t1, cont_t, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 3);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).args(0).type_id(), TFT_STRING);
ASSERT_EQ(rt.args(0).args(0).args(1).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(0).args(0).args(2).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(2).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(2).args(0).type_id(), TFT_INT64);
}
TEST(MapCovariant, Basic) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
const auto ret = MapCovariant(TFT_TENSOR, TFT_DATASET, 0)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
ASSERT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_INT32);
ASSERT_EQ(rt.args(0).args(0).args_size(), 0);
}
TEST(MapCovariant, IgnoresUnset) {
FullTypeDef t;
t.set_type_id(TFT_UNSET);
const auto ret = MapCovariant(TFT_TENSOR, TFT_DATASET, 0)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
ASSERT_EQ(rt.args_size(), 0);
}
TEST(MapCovariant, RejectsMismatchedType) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
const auto ret = MapCovariant(TFT_ARRAY, TFT_DATASET, 0)({t}, {});
EXPECT_THAT(ret.status().message(), ::testing::HasSubstr("expected type"));
}
static TypeInferenceFn tuple_func() {
std::vector<TypeInferenceFn> func_list{ReplicateInput(), Tensor(TFT_INT32)};
return Tuple(func_list);
}
TEST(Tuple, Basic) {
const TypeInferenceFn ret_func = tuple_func();
FullTypeDef t_in;
t_in.set_type_id(TFT_TENSOR);
t_in.add_args()->set_type_id(TFT_FLOAT);
const auto ret = ret_func({t_in}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(rt.args(1).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(1).args_size(), 1);
EXPECT_EQ(rt.args(1).args(0).type_id(), TFT_INT32);
}
TEST(Tuple, Unset) {
const TypeInferenceFn ret_func = tuple_func();
FullTypeDef t_in;
const auto ret = ret_func({t_in}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
ASSERT_EQ(rt.args_size(), 0);
}
}
}
} | TypeInferenceFn MapCovariant(FullTypeId t, FullTypeId u, int input_idx) {
return
[t, u, input_idx](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK_GE(input_types.size(), input_idx);
const FullTypeDef& in_t = input_types.at(input_idx).get();
FullTypeDef ret_type;
if (in_t.type_id() == TFT_UNSET) {
return ret_type;
}
if (in_t.type_id() != t) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected type ", t, " for input ",
input_idx, ", got ", in_t.DebugString()));
}
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* t = ret_type.add_args();
t->set_type_id(u);
*t->mutable_args() = in_t.args();
return ret_type;
};
} | TEST(MapCovariant, Basic) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
const auto ret = MapCovariant(TFT_TENSOR, TFT_DATASET, 0)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
ASSERT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_INT32);
ASSERT_EQ(rt.args(0).args(0).args_size(), 0);
}
TEST(MapCovariant, IgnoresUnset) {
FullTypeDef t;
t.set_type_id(TFT_UNSET);
const auto ret = MapCovariant(TFT_TENSOR, TFT_DATASET, 0)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
ASSERT_EQ(rt.args_size(), 0);
}
TEST(MapCovariant, RejectsMismatchedType) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
const auto ret = MapCovariant(TFT_ARRAY, TFT_DATASET, 0)({t}, {});
EXPECT_THAT(ret.status().message(), ::testing::HasSubstr("expected type"));
} |
#include "xla/client/lib/constants.h"
#include <limits>
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
XlaOp Zero(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::Zero(type));
}
XlaOp Zeros(XlaBuilder* builder, const Shape& shape) {
return Broadcast(Zero(builder, shape.element_type()), shape.dimensions());
}
XlaOp ZerosLike(XlaOp prototype) {
XlaBuilder* builder = prototype.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(prototype));
return Zeros(builder, shape);
});
}
XlaOp One(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::One(type));
}
XlaOp Epsilon(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::epsilon());
}
return builder->ReportError(InvalidArgument(
"Invalid type for Epsilon (%s).", PrimitiveType_Name(type)));
},
type);
}
XlaOp MinValue(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::MinValue(type));
}
XlaOp MinFiniteValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::lowest());
}
return MinValue(builder, type);
},
type);
}
XlaOp MinPositiveNormalValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::min());
}
return builder->ReportError(
InvalidArgument("Invalid type for MinPositiveNormalValue (%s).",
PrimitiveType_Name(type)));
},
type);
}
XlaOp MaxValue(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::MaxValue(type));
}
XlaOp MaxFiniteValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::max());
}
return MaxValue(builder, type);
},
type);
}
XlaOp NanValue(XlaBuilder* builder, PrimitiveType type) {
return primitive_util::PrimitiveTypeSwitch<XlaOp>(
[&](auto primitive_type_constant) -> XlaOp {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return ConstantR0<NativeT>(builder,
std::numeric_limits<NativeT>::quiet_NaN());
}
return builder->ReportError(InvalidArgument(
"Invalid type for NanValue (%s).", PrimitiveType_Name(type)));
},
type);
}
} | #include "xla/client/lib/constants.h"
#include <limits>
#include "xla/client/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ConstantsTest = ClientLibraryTestBase;
using ::testing::HasSubstr;
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4);
ComputeAndCompareR0<int32_t>(&builder, 4, {});
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeS32DoesNotAcceptFloats) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::S32, 4.5);
auto statusor = builder.Build();
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().message(), HasSubstr("Invalid cast"));
}
XLA_TEST_F(ConstantsTest, ConstantR0WithTypeF32) {
XlaBuilder builder(TestName());
ConstantR0WithType(&builder, xla::F32, -7);
ComputeAndCompareR0<float>(&builder, -7, {});
ConstantR0WithType(&builder, xla::F32, 0.5);
ComputeAndCompareR0<float>(&builder, 0.5, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeS32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<int32_t>(&builder, 42), -3);
ComputeAndCompareR0<int32_t>(&builder, -3, {});
}
XLA_TEST_F(ConstantsTest, ScalarLikeF32) {
XlaBuilder builder(TestName());
ScalarLike(ConstantR0<float>(&builder, 42.75), -3.2);
ComputeAndCompareR0<float>(&builder, -3.2, {});
}
XLA_TEST_F(ConstantsTest, ZeroS32) {
XlaBuilder builder(TestName());
Zero(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 0, {});
}
XLA_TEST_F(ConstantsTest, ZeroF32) {
XlaBuilder builder(TestName());
Zero(&builder, F32);
ComputeAndCompareR0<float>(&builder, 0.0, {});
}
XLA_TEST_F(ConstantsTest, ZerosS32) {
XlaBuilder builder(TestName());
Zeros(&builder, ShapeUtil::MakeShape(S32, {2, 2}));
ComputeAndCompareR2<int32_t>(&builder, {{0, 0}, {0, 0}}, {});
}
XLA_TEST_F(ConstantsTest, ZerosLikeF32) {
XlaBuilder builder(TestName());
ZerosLike(ConstantR1<float>(&builder, {1., 2., 3.}));
ComputeAndCompareR1<float>(&builder, {0., 0., 0.}, {});
}
XLA_TEST_F(ConstantsTest, OneS32) {
XlaBuilder builder(TestName());
One(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 1, {});
}
XLA_TEST_F(ConstantsTest, OneF32) {
XlaBuilder builder(TestName());
One(&builder, F32);
ComputeAndCompareR0<float>(&builder, 1., {});
}
XLA_TEST_F(ConstantsTest, EpsilonF32) {
XlaBuilder builder(TestName());
Epsilon(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::epsilon(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueS32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueS32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinFiniteValueF32) {
XlaBuilder builder(TestName());
MinFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MaxFiniteValueF32) {
XlaBuilder builder(TestName());
MaxFiniteValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::max(), {});
}
XLA_TEST_F(ConstantsTest, MinValueS32) {
XlaBuilder builder(TestName());
MinValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::min(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueS32) {
XlaBuilder builder(TestName());
MaxValue(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, std::numeric_limits<int32_t>::max(),
{});
}
XLA_TEST_F(ConstantsTest, MinValueF32) {
XlaBuilder builder(TestName());
MinValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, -std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, MaxValueF32) {
XlaBuilder builder(TestName());
MaxValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::infinity(),
{});
}
XLA_TEST_F(ConstantsTest, NanValueF32) {
XlaBuilder builder(TestName());
NanValue(&builder, F32);
ComputeAndCompareR0<float>(&builder, std::numeric_limits<float>::quiet_NaN(),
{});
}
}
} | #include "xla/client/lib/constants.h"
#include <limits>
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
XlaOp Zero(XlaBuilder* builder, PrimitiveType type) {
return ConstantLiteral(builder, LiteralUtil::Zero(type));
} | XLA_TEST_F(ConstantsTest, ZeroS32) {
XlaBuilder builder(TestName());
Zero(&builder, S32);
ComputeAndCompareR0<int32_t>(&builder, 0, {});
}
XLA_TEST_F(ConstantsTest, ZeroF32) {
XlaBuilder builder(TestName());
Zero(&builder, F32);
ComputeAndCompareR0<float>(&builder, 0.0, {});
} |
#include "tensorflow/lite/core/c/c_api_opaque.h"
#include <stdarg.h>
#include <stdint.h>
#include <cstdio>
#include <vector>
#include "tensorflow/lite/c/c_api_opaque_internal.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/util.h"
namespace {
const TfLiteTensor* Convert(const TfLiteOpaqueTensor* opaque_tensor) {
return reinterpret_cast<const TfLiteTensor*>(opaque_tensor);
}
TfLiteTensor* Convert(TfLiteOpaqueTensor* opaque_tensor) {
return reinterpret_cast<TfLiteTensor*>(opaque_tensor);
}
TfLiteNode* Convert(TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<TfLiteNode*>(opaque_node);
}
const TfLiteNode* Convert(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node);
}
const TfLiteContext* Convert(const TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<const TfLiteContext*>(opaque_context);
}
TfLiteContext* Convert(TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<TfLiteContext*>(opaque_context);
}
TfLiteOpaqueContext* Convert(TfLiteContext* tflite_context) {
return reinterpret_cast<TfLiteOpaqueContext*>(tflite_context);
}
const ::tflite::Subgraph* GetSubgraph(
const TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<const ::tflite::Subgraph*>(
Convert(opaque_context)->impl_);
}
::tflite::Subgraph* GetSubgraph(TfLiteOpaqueContext* opaque_context) {
return reinterpret_cast<::tflite::Subgraph*>(Convert(opaque_context)->impl_);
}
}
struct TfLiteOpaqueTensorBuilder {
TfLiteType type;
void* data;
TfLiteAllocationType allocation_type;
TfLiteQuantizationParams quantization_params;
TfLiteQuantization quantization;
};
TfLiteType TfLiteOpaqueTensorType(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorType(reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
int32_t TfLiteOpaqueTensorNumDims(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorNumDims(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
int32_t TfLiteOpaqueTensorDim(const TfLiteOpaqueTensor* opaque_tensor,
int32_t dim_index) {
return TfLiteTensorDim(reinterpret_cast<const TfLiteTensor*>(opaque_tensor),
dim_index);
}
TfLiteStatus TfLiteOpaqueTensorGetNumDimsSignature(
const TfLiteOpaqueTensor* opaque_tensor, int32_t* num_dims) {
const TfLiteTensor* tensor = Convert(opaque_tensor);
if (!tensor->dims_signature) {
*num_dims = -1;
return kTfLiteOk;
}
*num_dims = tensor->dims_signature->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueTensorGetDimSignature(
const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index,
int32_t* dim_length) {
const TfLiteTensor* tensor = Convert(opaque_tensor);
if (tensor->dims_signature != nullptr && tensor->dims_signature->size != 0) {
*dim_length = tensor->dims_signature->data[dim_index];
} else {
*dim_length = tensor->dims->data[dim_index];
}
return kTfLiteOk;
}
int TfLiteOpaqueTensorIsVariable(const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->is_variable ? 1 : 0;
}
size_t TfLiteOpaqueTensorByteSize(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorByteSize(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
void* TfLiteOpaqueTensorData(const TfLiteOpaqueTensor* opaque_tensor) {
return opaque_tensor != nullptr
? TfLiteTensorData(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor))
: nullptr;
}
TfLiteAllocationType TfLiteOpaqueTensorGetAllocationType(
const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->allocation_type;
}
TfLiteAllocationStrategy TfLiteOpaqueTensorGetAllocationStrategy(
const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetAllocationStrategy(Convert(t));
}
TfLiteRunStability TfLiteOpaqueTensorGetBufferAddressStability(
const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetBufferAddressStability(Convert(t));
}
TfLiteRunStability TfLiteOpaqueTensorGetDataStability(
const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetDataStability(Convert(t));
}
TfLiteRunStep TfLiteOpaqueTensorGetDataKnownStep(const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetDataKnownStep(Convert(t));
}
TfLiteRunStep TfLiteOpaqueTensorGetShapeKnownStep(const TfLiteOpaqueTensor* t) {
return TfLiteTensorGetShapeKnownStep(Convert(t));
}
const char* TfLiteOpaqueTensorName(const TfLiteOpaqueTensor* opaque_tensor) {
return TfLiteTensorName(reinterpret_cast<const TfLiteTensor*>(opaque_tensor));
}
TfLiteQuantization TfLiteOpaqueTensorGetQuantization(
const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->quantization;
}
TfLiteQuantizationParams TfLiteOpaqueTensorGetQuantizationParams(
const TfLiteOpaqueTensor* opaque_tensor) {
return Convert(opaque_tensor)->params;
}
TfLiteStatus TfLiteOpaqueTensorCopyFromBuffer(TfLiteOpaqueTensor* opaque_tensor,
const void* input_data,
size_t input_data_size) {
return TfLiteTensorCopyFromBuffer(
reinterpret_cast<TfLiteTensor*>(opaque_tensor), input_data,
input_data_size);
}
TfLiteStatus TfLiteOpaqueTensorCopyToBuffer(
const TfLiteOpaqueTensor* opaque_tensor, void* output_data,
size_t output_data_size) {
return TfLiteTensorCopyToBuffer(
reinterpret_cast<const TfLiteTensor*>(opaque_tensor), output_data,
output_data_size);
}
int TfLiteOpaqueTensorGetStringCount(const TfLiteOpaqueTensor* tensor) {
return tflite::GetStringCount(Convert(tensor));
}
TfLiteStatus TfLiteOpaqueTensorGetString(const TfLiteOpaqueTensor* tensor,
int index, const char** str,
int* len) {
tflite::StringRef str_ref = tflite::GetString(Convert(tensor), index);
*str = str_ref.str;
*len = str_ref.len;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueTensorWriteStrings(TfLiteOpaqueTensor* tensor,
const char* const* str_array,
int str_array_len,
const int* str_n_len) {
tflite::DynamicBuffer buf;
for (int i = 0; i < str_array_len; ++i) {
buf.AddString(str_array[i], str_n_len[i]);
}
buf.WriteToTensorAsVector(Convert(tensor));
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueTensorWriteString(TfLiteOpaqueTensor* tensor,
const char* str, const int len) {
TfLiteOpaqueTensorWriteStrings(tensor, &str, 1, &len);
return kTfLiteOk;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderCreate() {
return new TfLiteOpaqueTensorBuilder{};
}
void TfLiteOpaqueTensorBuilderDelete(TfLiteOpaqueTensorBuilder* builder) {
delete builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetType(
TfLiteOpaqueTensorBuilder* builder, TfLiteType type) {
builder->type = type;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetData(
TfLiteOpaqueTensorBuilder* builder, void* data) {
builder->data = data;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetAllocationType(
TfLiteOpaqueTensorBuilder* builder, TfLiteAllocationType allocation_type) {
builder->allocation_type = allocation_type;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetQuantizationParams(
TfLiteOpaqueTensorBuilder* builder, TfLiteQuantizationParams params) {
builder->quantization_params = params;
return builder;
}
TfLiteOpaqueTensorBuilder* TfLiteOpaqueTensorBuilderSetQuantization(
TfLiteOpaqueTensorBuilder* builder, TfLiteQuantization quantization) {
builder->quantization = quantization;
return builder;
}
void TfLiteOpaqueTensorSetAllocationTypeToDynamic(TfLiteOpaqueTensor* tensor) {
tflite::SetTensorToDynamic(Convert(tensor));
}
const TfLiteOpaqueTensor* TfLiteOpaqueNodeGetInput(
const TfLiteOpaqueContext* opaque_context,
const TfLiteOpaqueNode* opaque_node, int index) {
const TfLiteTensor* tensor =
tflite::GetInput(reinterpret_cast<const TfLiteContext*>(opaque_context),
reinterpret_cast<const TfLiteNode*>(opaque_node), index);
return reinterpret_cast<const TfLiteOpaqueTensor*>(tensor);
}
TfLiteOpaqueTensor* TfLiteOpaqueNodeGetOutput(
TfLiteOpaqueContext* opaque_context, const TfLiteOpaqueNode* opaque_node,
int index) {
TfLiteTensor* tensor = tflite::GetOutput(
reinterpret_cast<TfLiteContext*>(opaque_context),
reinterpret_cast<const TfLiteNode*>(opaque_node), index);
return reinterpret_cast<TfLiteOpaqueTensor*>(tensor);
}
int TfLiteOpaqueNodeNumberOfInputs(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node)->inputs->size;
}
int TfLiteOpaqueNodeNumberOfOutputs(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node)->outputs->size;
}
void* TfLiteOpaqueNodeGetUserData(const TfLiteOpaqueNode* opaque_node) {
return reinterpret_cast<const TfLiteNode*>(opaque_node)->user_data;
}
void* TfLiteOpaqueNodeGetBuiltinData(const TfLiteOpaqueNode* opaque_node) {
return Convert(opaque_node)->builtin_data;
}
TfLiteStatus TfLiteOpaqueNodeGetCustomInitialData(
const TfLiteOpaqueNode* opaque_node, const void** init_data, int* size) {
*init_data = Convert(opaque_node)->custom_initial_data;
*size = Convert(opaque_node)->custom_initial_data_size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeInputs(const TfLiteOpaqueNode* opaque_node,
const int** inputs, int* num_inputs) {
const TfLiteNode* node = Convert(opaque_node);
*inputs = node->inputs->data;
*num_inputs = node->inputs->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeOutputs(const TfLiteOpaqueNode* opaque_node,
const int** outputs, int* num_outputs) {
const TfLiteNode* node = Convert(opaque_node);
*outputs = node->outputs->data;
*num_outputs = node->outputs->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeTemporaries(const TfLiteOpaqueNode* opaque_node,
const int** temporaries,
int* num_temporaries) {
const TfLiteNode* node = Convert(opaque_node);
*temporaries = node->temporaries->data;
*num_temporaries = node->temporaries->size;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueNodeSetTemporaries(TfLiteOpaqueNode* opaque_node,
const int* temporaries,
int num_temporaries) {
if (num_temporaries < 0) {
return kTfLiteError;
}
TfLiteNode* node = Convert(opaque_node);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(num_temporaries);
for (int i = 0; i < num_temporaries; ++i) {
node->temporaries->data[i] = temporaries[i];
}
return kTfLiteOk;
}
int TfLiteOpaqueNodeGetInputTensorIndex(const TfLiteOpaqueNode* opaque_node,
int index_of_input) {
auto* node = Convert(opaque_node);
if (index_of_input < 0 || index_of_input >= node->inputs->size) {
return -1;
}
return node->inputs->data[index_of_input];
}
int TfLiteOpaqueNodeGetOutputTensorIndex(const TfLiteOpaqueNode* opaque_node,
int index_of_output) {
auto* node = Convert(opaque_node);
if (index_of_output < 0 || index_of_output >= node->outputs->size) {
return -1;
}
return node->outputs->data[index_of_output];
}
TfLiteStatus TfLiteOpaqueContextGetExecutionPlan(
TfLiteOpaqueContext* opaque_context, TfLiteIntArray** execution_plan) {
auto context = reinterpret_cast<TfLiteContext*>(opaque_context);
return context->GetExecutionPlan(context, execution_plan);
}
TfLiteStatus TfLiteOpaqueContextGetNodeAndRegistration(
struct TfLiteOpaqueContext* opaque_context, int node_index,
TfLiteOpaqueNode** node, TfLiteOperator** registration_external) {
TfLiteContext* context = reinterpret_cast<TfLiteContext*>(opaque_context);
TfLiteNode* local_node;
TfLiteRegistration* registration;
TfLiteStatus status = context->GetNodeAndRegistration(
context, node_index, &local_node, ®istration);
if (status != kTfLiteOk) return status;
*node = reinterpret_cast<TfLiteOpaqueNode*>(local_node);
if (registration->registration_external) {
*registration_external = registration->registration_external;
return kTfLiteOk;
}
auto derived_registration =
tflite::internal::CommonOpaqueConversionUtil::ObtainOperator(
context, registration, node_index);
if (derived_registration == nullptr) return kTfLiteError;
*registration_external = derived_registration;
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
struct TfLiteOpaqueContext* opaque_context,
TfLiteOperator* registration_external,
const TfLiteIntArray* nodes_to_replace,
TfLiteOpaqueDelegate* opaque_delegate) {
TfLiteContext* context = reinterpret_cast<TfLiteContext*>(opaque_context);
TfLiteDelegate* delegate = reinterpret_cast<TfLiteDelegate*>(opaque_delegate);
TfLiteRegistration registration{};
registration.registration_external = registration_external;
TfLiteStatus status = context->ReplaceNodeSubsetsWithDelegateKernels(
context, registration, nodes_to_replace, delegate);
return status;
}
TfLiteOpaqueTensor* TfLiteOpaqueContextGetOpaqueTensor(
const TfLiteOpaqueContext* opaque_context, int index) {
auto context = reinterpret_cast<const TfLiteContext*>(opaque_context);
return reinterpret_cast<TfLiteOpaqueTensor*>(&context->tensors[index]);
}
TfLiteStatus TfLiteOpaqueContextGetInputs(
const struct TfLiteOpaqueContext* opaque_context, const int** inputs,
int* num_inputs) {
auto* subgraph = GetSubgraph(opaque_context);
const std::vector<int>& subgraph_inputs = subgraph->inputs();
*inputs = subgraph_inputs.data();
*num_inputs = subgraph_inputs.size();
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextGetOutputs(
const struct TfLiteOpaqueContext* opaque_context, const int** outputs,
int* num_outputs) {
auto* subgraph = GetSubgraph(opaque_context);
const std::vector<int>& subgraph_outputs = subgraph->outputs();
*outputs = subgraph_outputs.data();
*num_outputs = subgraph_outputs.size();
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextGetVariables(
const struct TfLiteOpaqueContext* opaque_context, const int** variables,
int* num_variables) {
auto* subgraph = GetSubgraph(opaque_context);
const std::vector<int>& subgraph_variables = subgraph->variables();
*variables = subgraph_variables.data();
*num_variables = subgraph_variables.size();
return kTfLiteOk;
}
size_t TfLiteOpaqueContextGetNumNodes(
const struct TfLiteOpaqueContext* opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->nodes_size();
}
size_t TfLiteOpaqueContextGetNumTensors(
const struct TfLiteOpaqueContext* opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->tensors_size();
}
const char* TfLiteOpaqueContextGetName(
const struct TfLiteOpaqueContext* opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->GetName().c_str();
}
TfLiteStatus TfLiteOpaqueContextResizeTensor(TfLiteOpaqueContext* context,
TfLiteOpaqueTensor* tensor,
TfLiteIntArray* new_size) {
TfLiteContext* tflite_context = reinterpret_cast<TfLiteContext*>(context);
return tflite_context->ResizeTensor(
tflite_context, reinterpret_cast<TfLiteTensor*>(tensor), new_size);
}
TfLiteStatus TfLiteOpaqueContextAcquireSubgraphContext(
struct TfLiteOpaqueContext* opaque_context, int subgraph_index,
TfLiteOpaqueContext** acquired_opaque_context) {
auto* subgraph = GetSubgraph(opaque_context);
TfLiteContext* acquired_context;
TfLiteStatus status =
subgraph->AcquireSubgraphContext(subgraph_index, &acquired_context);
if (status != kTfLiteOk) {
return status;
}
*acquired_opaque_context = Convert(acquired_context);
return kTfLiteOk;
}
TfLiteStatus TfLiteOpaqueContextReleaseSubgraphContext(
struct TfLiteOpaqueContext* opaque_context, int subgraph_index) {
return GetSubgraph(opaque_context)->ReleaseSubgraphContext(subgraph_index);
}
TfLiteStatus TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable(
TfLiteOpaqueContext* opaque_context, int subgraph_index) {
auto* subgraph = GetSubgraph(opaque_context);
return subgraph->MarkSubgraphAsDelegationSkippable(subgraph_index);
}
TfLiteStatus TfLiteOpaqueContextGetNodeInitDataMmapInfo(
const TfLiteOpaqueContext* context, const TfLiteOpaqueNode* node, int* fd,
int64_t* custom_initial_data_offset_in_file,
int64_t* custom_initial_data_size) {
auto* subgraph = GetSubgraph(context);
return subgraph->GetNodeInitDataMmapInfo(Convert(node), fd,
custom_initial_data_offset_in_file,
custom_initial_data_size);
}
TfLiteStatus TfLiteOpaqueContextAddTensor(TfLiteOpaqueContext* context,
TfLiteOpaqueTensorBuilder* builder,
int* new_tensor_index) {
if (builder->allocation_type != kTfLiteDynamic &&
builder->allocation_type != kTfLiteArenaRw &&
builder->allocation_type != kTfLiteArenaRwPersistent) {
TfLiteOpaqueContextReportError(
context,
"Invalid allocation type '%d'. Allocation type for "
"TfLiteOpaqueContextAddTensor must be one of: "
"'kTfLiteDynamic', 'kTfLiteArenaRw' or 'kTfLiteArenaRwPersistent'.",
builder->allocation_type);
return kTfLiteError;
}
if (builder->allocation_type == kTfLiteDynamic && builder->data == nullptr) {
TfLiteOpaqueContextReportError(context,
"For tensors of allocation type "
"'kTfLiteDynamic' 'data' must be provided.");
return kTfLiteError;
}
if ((builder->allocation_type == kTfLiteArenaRw ||
builder->allocation_type == kTfLiteArenaRwPersistent) &&
builder->data != nullptr) {
TfLiteOpaqueContextReportError(
context,
"For tensors of allocation type "
"'kTfLiteArenaRw' or 'kTfLiteArenaRwPersistent' "
"'data' must not be provided.");
return kTfLiteError;
}
auto* tflite_context = Convert(context);
int index = -1;
auto status = tflite_context->AddTensors(tflite_context, 1, &index);
if (status != kTfLiteOk) return status;
tflite_context->tensors[index].type = builder->type;
tflite_context->tensors[index].data.data = builder->data;
tflite_context->tensors[index].allocation_type = builder->allocation_type;
tflite_context->tensors[index].params = builder->quantization_params;
tflite_context->tensors[index].quantization = builder->quantization;
if (new_tensor_index != nullptr) {
*new_tensor_index = index;
}
return status;
}
TfLiteStatus TfLiteOpaqueContextGetSizeOfType(TfLiteOpaqueContext* context,
const TfLiteType type,
size_t* bytes) {
return tflite::GetSizeOfType(Convert(context), type, bytes);
}
void TfLiteOpaqueContextReportError(struct TfLiteOpaqueContext* opaque_context,
const char* format, ...) {
va_list vlist;
va_start(vlist, format);
TfLiteOpaqueContextReportErrorVa(opaque_context, format, vlist);
va_end(vlist);
}
void TfLiteOpaqueContextReportErrorVa(
struct TfLiteOpaqueContext* opaque_context, const char* format,
va_list vlist) {
va_list copy;
va_copy(copy, vlist);
int n = vsnprintf(nullptr, 0, format, copy);
if (n < 0) {
return;
}
size_t size = (size_t)n + 1;
char* buffer = new char[size];
n = vsnprintf(buffer, size, format, vlist);
if (n < 0) {
delete[] buffer;
return;
}
auto* context = reinterpret_cast<TfLiteContext*>(opaque_context);
TF_LITE_KERNEL_LOG(context, "%s", buffer);
delete[] buffer;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate(
const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder) {
if (!opaque_delegate_builder) return nullptr;
TfLiteDelegate* result = new TfLiteDelegate{};
result->opaque_delegate_builder = new TfLiteOpaqueDelegateBuilder{};
*(result->opaque_delegate_builder) = *opaque_delegate_builder;
return reinterpret_cast<TfLiteOpaqueDelegate*>(result);
}
void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* opaque_delegate) {
if (!opaque_delegate) return;
const TfLiteDelegate* tflite_delegate =
reinterpret_cast<const TfLiteDelegate*>(opaque_delegate);
delete tflite_delegate->opaque_delegate_builder;
delete tflite_delegate;
}
#endif
void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate) {
if (!delegate) return nullptr;
const auto* tflite_delegate =
reinterpret_cast<const TfLiteDelegate*>(delegate);
if (!tflite_delegate->opaque_delegate_builder) return tflite_delegate->data_;
return tflite_delegate->opaque_delegate_builder->data;
} | #include "tensorflow/lite/core/c/c_api_opaque.h"
#include <stddef.h>
#include <cstring>
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api.h"
namespace tflite {
namespace {
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithMemNoneBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithMmapRoBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithArenaRwBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithArenaRwPersistentBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithDynamicBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithPersistentRoBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithCustomBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetAllocationStrategy,
WithVariantObjectBehavesAsTfLiteTensorGetAllocationStrategy) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetAllocationStrategy(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetAllocationStrategy(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithMemNoneBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithMmapRoBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithArenaRwBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithArenaRwPersistentBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithDynamicBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithPersistentRoBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithCustomBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetBufferAddressStability,
WithVariantObjectBehavesAsTfLiteTensorGetBufferAddressStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetBufferAddressStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetBufferAddressStability(&t));
}
TEST(TestTfLiteOpaqueTensorData, ValidInput) {
TfLiteTensor t;
char data[] = "data";
t.data.raw = data;
EXPECT_EQ(TfLiteOpaqueTensorData(reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
data);
}
TEST(TestTfLiteOpaqueTensorData, NullInput) {
EXPECT_EQ(TfLiteOpaqueTensorData(nullptr), nullptr);
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithMemNoneBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithMmapRoBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithArenaRwBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithArenaRwPersistentBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithDynamicBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithPersistentRoBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithCustomBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataStability,
WithVariantObjectBehavesAsTfLiteTensorGetDataStability) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetDataStability(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataStability(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithMemNoneBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithMmapRoBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithArenaRwBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithArenaRwPersistentBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithDynamicBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithPersistentRoBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithCustomBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetDataKnownStep,
WithVariantObjectBehavesAsTfLiteTensorGetDataKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetDataKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetDataKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithMemNoneBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithMmapRoBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithArenaRwBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithArenaRwPersistentBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithDynamicBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithPersistentRoBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithCustomBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueTensorGetShapeKnownStep,
WithVariantObjectBehavesAsTfLiteTensorGetShapeKnownStep) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteOpaqueTensorGetShapeKnownStep(
reinterpret_cast<TfLiteOpaqueTensor*>(&t)),
TfLiteTensorGetShapeKnownStep(&t));
}
TEST(TestTfLiteOpaqueDelegate, CreateAndDelete) {
std::unique_ptr<TfLiteOpaqueDelegateBuilder> opaque_delegate_builder(
new TfLiteOpaqueDelegateBuilder{});
TfLiteOpaqueDelegate* opaque_delegate =
TfLiteOpaqueDelegateCreate(opaque_delegate_builder.get());
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
TEST(TestTfLiteOpaqueDelegate, Create_WithNull) {
EXPECT_EQ(nullptr, TfLiteOpaqueDelegateCreate(nullptr));
}
TEST(TestTfLiteOpaqueDelegate, Delete_WithNull) {
TfLiteOpaqueDelegateDelete(nullptr);
}
TEST(TestTfLiteOpaqueDelegate, GetData_WellFormedOpaqueDelegate) {
int delegate_data = 42;
TfLiteOpaqueDelegateBuilder builder{};
builder.data = &delegate_data;
TfLiteOpaqueDelegate* opaque_delegate = TfLiteOpaqueDelegateCreate(&builder);
EXPECT_EQ(&delegate_data, TfLiteOpaqueDelegateGetData(opaque_delegate));
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
TEST(TestTfLiteOpaqueDelegate,
GetData_NotConstructedWithTfLiteOpaqueDelegateCreate) {
int delegate_data = 42;
TfLiteDelegate non_opaque_delegate = TfLiteDelegateCreate();
non_opaque_delegate.data_ = &delegate_data;
auto* opaque_delegate =
reinterpret_cast<TfLiteOpaqueDelegate*>(&non_opaque_delegate);
EXPECT_EQ(&delegate_data, TfLiteOpaqueDelegateGetData(opaque_delegate));
}
TEST(TestTfLiteOpaqueDelegate, GetData_NoDataSetViaOpaqueDelegateBuilder) {
TfLiteOpaqueDelegateBuilder builder{};
TfLiteOpaqueDelegate* opaque_delegate = TfLiteOpaqueDelegateCreate(&builder);
EXPECT_EQ(nullptr, TfLiteOpaqueDelegateGetData(opaque_delegate));
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
namespace my_custom_op {
struct MyOpData {
int temp_tensor_index;
};
void* Init(TfLiteOpaqueContext* context, const char* buffer, size_t length) {
auto* op_data = new MyOpData{};
return op_data;
}
void Free(TfLiteOpaqueContext* context, void* buffer) {
delete reinterpret_cast<MyOpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
auto* op_data =
reinterpret_cast<MyOpData*>(TfLiteOpaqueNodeGetUserData(node));
const int num_temporaries = 1;
int temporary_tensor_indices[num_temporaries];
TfLiteStatus status =
TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
-1);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteError);
status = TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
0);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteOk);
TfLiteOpaqueTensorBuilder* builder = TfLiteOpaqueTensorBuilderCreate();
TfLiteOpaqueTensorBuilderSetType(builder, kTfLiteFloat32);
TfLiteOpaqueTensorBuilderSetAllocationType(builder, kTfLiteArenaRw);
TfLiteOpaqueContextAddTensor(context, builder, &temporary_tensor_indices[0]);
TfLiteOpaqueTensorBuilderDelete(builder);
status = TfLiteOpaqueNodeSetTemporaries(node, temporary_tensor_indices,
num_temporaries);
TF_LITE_OPAQUE_ENSURE(context, status == kTfLiteOk);
op_data->temp_tensor_index = temporary_tensor_indices[0];
TfLiteOpaqueTensor* temp_tensor =
TfLiteOpaqueContextGetOpaqueTensor(context, op_data->temp_tensor_index);
TfLiteIntArray* temp_size = TfLiteIntArrayCreate(1);
temp_size->data[0] = 1;
return TfLiteOpaqueContextResizeTensor(context, temp_tensor, temp_size);
}
TfLiteStatus Invoke(TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
auto* op_data =
reinterpret_cast<MyOpData*>(TfLiteOpaqueNodeGetUserData(node));
const int* temporary_tensor_indices;
int num_temporaries;
TfLiteOpaqueNodeTemporaries(node, &temporary_tensor_indices,
&num_temporaries);
TF_LITE_OPAQUE_ENSURE(context, num_temporaries == 1);
TF_LITE_OPAQUE_ENSURE(
context, temporary_tensor_indices[0] == op_data->temp_tensor_index);
TfLiteOpaqueTensor* temp_tensor =
TfLiteOpaqueContextGetOpaqueTensor(context, op_data->temp_tensor_index);
TF_LITE_OPAQUE_ENSURE(context,
TfLiteOpaqueTensorType(temp_tensor) == kTfLiteFloat32);
TF_LITE_OPAQUE_ENSURE(context, TfLiteOpaqueTensorGetAllocationType(
temp_tensor) == kTfLiteArenaRw);
size_t temp_bytes = TfLiteOpaqueTensorByteSize(temp_tensor);
void* temp_data = TfLiteOpaqueTensorData(temp_tensor);
TF_LITE_OPAQUE_ENSURE(context, temp_bytes != 0);
TF_LITE_OPAQUE_ENSURE(context, temp_data != nullptr);
EXPECT_EQ(1, TfLiteOpaqueNodeNumberOfInputs(node));
const TfLiteOpaqueTensor* input = TfLiteOpaqueNodeGetInput(context, node, 0);
size_t input_bytes = TfLiteOpaqueTensorByteSize(input);
void* input_data = TfLiteOpaqueTensorData(input);
EXPECT_EQ(input_bytes, temp_bytes);
std::memcpy(temp_data, input_data, input_bytes);
EXPECT_EQ(1, TfLiteOpaqueNodeNumberOfOutputs(node));
TfLiteOpaqueTensor* output = TfLiteOpaqueNodeGetOutput(context, node, 0);
size_t output_bytes = TfLiteOpaqueTensorByteSize(output);
void* output_data = TfLiteOpaqueTensorData(output);
EXPECT_EQ(output_bytes, temp_bytes);
std::memcpy(output_data, temp_data, output_bytes);
return kTfLiteOk;
}
}
TEST(TestTfLiteOpaqueNode, CustomOpWithSetAndGetTemporaries) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepare(reg, my_custom_op::Prepare);
TfLiteOperatorSetInit(reg, my_custom_op::Init);
TfLiteOperatorSetFree(reg, my_custom_op::Free);
TfLiteOperatorSetInvoke(reg, my_custom_op::Invoke);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithLegacyCallbacks) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepare(reg, [](auto context, auto node) {
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInit(reg, [](auto context, auto buffer, auto length) {
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFree(
reg, [](auto context, auto data) { my_custom_op::Free(context, data); });
TfLiteOperatorSetInvoke(reg, [](auto context, auto node) {
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithNoUserData) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
nullptr);
TfLiteOperatorSetPrepareWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInitWithData(
reg, [](auto user_data, auto context, auto buffer, auto length) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFreeWithData(reg,
[](auto user_data, auto context, auto data) {
EXPECT_EQ(nullptr, user_data);
my_custom_op::Free(context, data);
});
TfLiteOperatorSetInvokeWithData(reg,
[](auto user_data, auto context, auto node) {
EXPECT_EQ(nullptr, user_data);
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
TEST(TestTfLiteOpaqueNode, CustomOpWithData) {
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/custom_sinh.bin");
ASSERT_NE(model, nullptr);
TfLiteOperator* reg =
TfLiteOperatorCreateWithData(kTfLiteBuiltinCustom, "Sinh", 1,
reinterpret_cast<void*>(345));
TfLiteOperatorSetPrepareWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Prepare(context, node);
});
TfLiteOperatorSetInitWithData(
reg, [](auto user_data, auto context, auto buffer, auto length) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Init(context, buffer, length);
});
TfLiteOperatorSetFreeWithData(
reg, [](auto user_data, auto context, auto data) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
my_custom_op::Free(context, data);
});
TfLiteOperatorSetInvokeWithData(
reg, [](auto user_data, auto context, auto node) {
EXPECT_EQ(reinterpret_cast<void*>(345), user_data);
return my_custom_op::Invoke(context, node);
});
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsAddOperator(options, reg);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
const float input_value = 42.0f;
TfLiteTensorCopyFromBuffer(input_tensor, &input_value, sizeof(float));
EXPECT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
float output_value;
TfLiteTensorCopyToBuffer(output_tensor, &output_value, sizeof(float));
EXPECT_EQ(output_value, input_value);
TfLiteInterpreterDelete(interpreter);
TfLiteOperatorDelete(reg);
TfLiteModelDelete(model);
}
}
} | TfLiteStatus TfLiteOpaqueTensorGetDimSignature(
const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index,
int32_t* dim_length) {
const TfLiteTensor* tensor = Convert(opaque_tensor);
if (tensor->dims_signature != nullptr && tensor->dims_signature->size != 0) {
*dim_length = tensor->dims_signature->data[dim_index];
} else {
*dim_length = tensor->dims->data[dim_index];
}
return kTfLiteOk;
} | |
#include "xla/service/cpu/runtime/thunk_executor.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
ThunkExecutor::ThunkExecutor(ThunkSequence thunk_sequence,
std::vector<NodeDef> nodes_defs)
: thunk_sequence_(std::move(thunk_sequence)),
nodes_defs_(std::move(nodes_defs)),
is_sequential_(true) {
for (NodeId i = 0; i < nodes_defs_.size(); ++i) {
if (nodes_defs_[i].in_edges.empty()) {
source_.push_back(i);
}
if (nodes_defs_[i].out_edges.empty()) {
sink_.push_back(i);
}
}
int64_t num_erased_edges = TransitiveReduction();
for (NodeId i = 1; i < nodes_defs_.size() && is_sequential_; ++i) {
is_sequential_ &= (absl::c_count(nodes_defs_[i].in_edges, i - 1) != 0);
}
VLOG(2) << absl::StreamFormat(
"Constructed ThunkExecutor with %d nodes: #source_nodes=%d "
"#sink_nodes=%d, #erased_edges=%d, is_sequential=%v",
nodes_defs_.size(), source_.size(), sink_.size(), num_erased_edges,
is_sequential_);
DCHECK((!source_.empty() && !sink_.empty() && !thunk_sequence_.empty()) ||
(source_.empty() && sink_.empty() && thunk_sequence_.empty()));
}
absl::StatusOr<ThunkExecutor> ThunkExecutor::Create(
ThunkSequence thunk_sequence) {
std::vector<NodeDef> defs(thunk_sequence.size());
std::vector<BufferUse::ReadWriteSet> rwsets(thunk_sequence.size());
std::vector<Thunk::BufferUses> buffer_uses(thunk_sequence.size());
for (NodeId i = 0; i < thunk_sequence.size(); ++i) {
defs[i].id = i;
Thunk& thunk = *thunk_sequence[i];
rwsets[i].AddAll(thunk.buffer_uses());
for (NodeId j = i - 1; j >= 0; --j) {
if (rwsets[j].HasConflicts(rwsets[i])) {
defs[j].out_edges.push_back(i);
defs[i].in_edges.push_back(j);
}
}
}
return ThunkExecutor(std::move(thunk_sequence), std::move(defs));
}
ThunkExecutor::ExecuteState::ExecuteState(ThunkExecutor* executor,
TaskRunner runner)
: executor(executor),
runner(std::move(runner)),
counters(executor->nodes_defs().size()),
nodes(executor->nodes_defs().size()),
abort(false),
pending_sink_nodes(executor->sink().size()),
execute_event(tsl::MakeConstructedAsyncValueRef<ExecuteEvent>()) {
for (NodeId id = 0; id < nodes.size(); ++id) {
const NodeDef& node_def = executor->node_def(id);
counters[id].store(node_def.in_edges.size(), std::memory_order_release);
nodes[id] = Node{id, &counters[id], &node_def.out_edges};
}
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent> ThunkExecutor::Execute(
const Thunk::ExecuteParams& params, TaskRunner runner) {
if (ABSL_PREDICT_FALSE(thunk_sequence_.empty())) {
return Thunk::OkExecuteEvent();
}
if (ABSL_PREDICT_FALSE(thunk_sequence_.size() == 1)) {
return thunk_sequence_[0]->Execute(params);
}
if (is_sequential_) {
return ExecuteSequential(params);
}
auto state = std::make_unique<ExecuteState>(this, std::move(runner));
Execute(state.get(), params, ReadyQueue(source_.begin(), source_.end()));
auto execute_event = state->execute_event;
execute_event.AndThen([state = std::move(state)] {
CHECK_EQ(state->pending_sink_nodes.load(std::memory_order_acquire), 0)
<< "All sink nodes must be completed before execute_event is marked "
"available.";
});
return execute_event;
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent>
ThunkExecutor::ExecuteSequential(const Thunk::ExecuteParams& params) {
for (int64_t i = 0; i < thunk_sequence_.size(); ++i) {
Thunk& thunk = *thunk_sequence_[i];
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
execute_event.AndThen([this, ¶ms, i, event](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(i + 1, params, std::move(event));
}
});
return event;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
return execute_event;
}
}
return Thunk::OkExecuteEvent();
}
void ThunkExecutor::ResumeExecuteSequential(
int64_t index, const Thunk::ExecuteParams& params,
tsl::AsyncValueRef<ExecuteEvent> event) {
for (int64_t i = index; i < thunk_sequence_.size(); ++i) {
Thunk& thunk = *thunk_sequence_[i];
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
execute_event.AndThen(
[this, ¶ms, i, event = std::move(event)](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(i + 1, params, std::move(event));
}
});
return;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
event.SetError(execute_event.GetError());
return;
}
}
event.SetStateConcrete();
}
void ThunkExecutor::Execute(ExecuteState* state,
const Thunk::ExecuteParams& params,
ReadyQueue ready_queue) {
tsl::profiler::TraceMe trace("ThunkExecutor::Execute");
if (ready_queue.empty()) return;
bool has_runner = state->runner != nullptr;
for (int64_t i = 0; i < ready_queue.size(); ++i) {
NodeId id = ready_queue[i];
Node& node = state->nodes[id];
int64_t cnt = node.counter->load(std::memory_order_acquire);
CHECK_EQ(cnt, 0) << "Node counter must be 0";
if (has_runner && i < ready_queue.size() - 1) {
ReadyQueue tail(ready_queue.begin() + i + 1, ready_queue.end());
ready_queue.erase(ready_queue.begin() + i + 1, ready_queue.end());
state->runner([¶ms, state, tail = std::move(tail)]() mutable {
state->executor->Execute(state, params, std::move(tail));
});
}
Thunk& thunk = *state->executor->thunk_sequence_[id];
auto execute_event = state->abort.load(std::memory_order_relaxed)
? Thunk::OkExecuteEvent()
: thunk.Execute(params);
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
execute_event.AndThen([&, state, execute_event = execute_event.AsPtr()] {
ReadyQueue ready_queue;
ProcessOutEdges(state, execute_event, node, ready_queue);
Execute(state, params, std::move(ready_queue));
});
} else {
ProcessOutEdges(state, execute_event.AsPtr(), node, ready_queue);
}
}
}
void ThunkExecutor::ProcessOutEdges(
ExecuteState* state, tsl::AsyncValuePtr<Thunk::ExecuteEvent> node_event,
Node& node, ReadyQueue& ready_queue) {
if (ABSL_PREDICT_FALSE(node_event.IsError())) {
absl::MutexLock lock(&state->abort_mutex);
state->abort = true;
state->abort_status.Update(node_event.GetError());
}
bool is_sink = node.out_edges->empty();
for (NodeId out_edge : *node.out_edges) {
Node& out_node = state->nodes[out_edge];
int64_t cnt = out_node.counter->fetch_sub(1, std::memory_order_release);
CHECK_GE(cnt, 1) << "Node counter can't drop below 0";
if (cnt == 1) ready_queue.push_back(out_edge);
}
if (ABSL_PREDICT_FALSE(is_sink)) {
bool is_done =
state->pending_sink_nodes.fetch_sub(1, std::memory_order_acq_rel) == 1;
if (ABSL_PREDICT_TRUE(!is_done)) return;
if (ABSL_PREDICT_FALSE(state->abort.load(std::memory_order_relaxed))) {
auto take_error = [&] {
absl::MutexLock lock(&state->abort_mutex);
CHECK(!state->abort_status.ok())
<< "Abort status must be set if execution is aborted";
return std::move(state->abort_status);
};
state->execute_event.SetError(take_error());
} else {
state->execute_event.SetStateConcrete();
}
}
}
int64_t ThunkExecutor::TransitiveReduction() {
int64_t num_erased_edges = 0;
auto erase_edge = [&](NodeDef& from, NodeDef& to) {
auto out_edge_it = absl::c_find(from.out_edges, to.id);
auto in_edge_it = absl::c_find(to.in_edges, from.id);
bool has_out_edge = out_edge_it != from.out_edges.end();
bool has_in_edge = in_edge_it != to.in_edges.end();
DCHECK_EQ(has_out_edge, has_in_edge) << "Edges must be symmetric";
if (has_out_edge && has_in_edge) {
from.out_edges.erase(out_edge_it);
to.in_edges.erase(in_edge_it);
++num_erased_edges;
}
};
std::vector<int64_t> stack;
std::vector<bool> visited;
auto add_to_stack = [&](int64_t node_id) {
if (!visited[node_id]) {
stack.push_back(node_id);
visited[node_id] = true;
}
};
for (int64_t i = 0; i < nodes_defs_.size(); ++i) {
NodeDef& source_node = nodes_defs_[i];
stack.clear();
visited.assign(nodes_defs_.size(), false);
for (int64_t out_id : source_node.out_edges) {
NodeDef& out_node = nodes_defs_[out_id];
for (int64_t start_id : out_node.out_edges) add_to_stack(start_id);
}
while (!stack.empty()) {
int64_t node_id = stack.back();
stack.pop_back();
NodeDef& node = nodes_defs_[node_id];
erase_edge(source_node, node);
for (int64_t out_id : node.out_edges) add_to_stack(out_id);
}
}
return num_erased_edges;
}
std::string ThunkExecutor::ToString() const {
std::string str = absl::StrFormat(
"ThunkExecutor: #thunks=%d #source_nodes=%d #sink_nodes=%d",
thunk_sequence_.size(), source_.size(), sink_.size());
std::vector<std::vector<std::string>> in_edges(thunk_sequence_.size());
for (const auto& node_def : nodes_defs_) {
for (NodeId in_edge : node_def.in_edges) {
in_edges[node_def.id].push_back(thunk_sequence_[in_edge]->info().op_name);
}
}
for (NodeId i = 0; i < thunk_sequence_.size(); ++i) {
const Thunk& thunk = *thunk_sequence_[i];
bool is_source = absl::c_find(source_, i) != source_.end();
bool is_sink = absl::c_find(sink_, i) != sink_.end();
absl::StrAppendFormat(
&str,
"\n thunk #%05d: op_name=%s, dependencies=[%s], source=%v, sink=%v", i,
thunk.info().op_name, absl::StrJoin(in_edges[i], ", "), is_source,
is_sink);
}
return str;
}
} | #include "xla/service/cpu/runtime/thunk_executor.h"
#define EIGEN_USE_THREADS
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/task.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace xla::cpu {
namespace {
using ::testing::ElementsAre;
class AddI32Thunk final : public Thunk {
public:
AddI32Thunk(std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace, bool inject_error,
bool inject_side_effect);
static std::unique_ptr<Thunk> Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace = nullptr, bool inject_error = false,
bool inject_side_effect = false);
static std::vector<MaybeOwningDeviceMemory> AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data);
static absl::Status Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice);
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams&) final;
BufferUses buffer_uses() const final;
private:
std::vector<BufferAllocation::Slice> srcs_;
std::vector<BufferAllocation::Slice> dsts_;
std::vector<std::string>* trace_;
bool inject_error_;
bool inject_side_effect_;
};
std::unique_ptr<Thunk> AddI32Thunk::Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts, std::vector<std::string>* trace,
bool inject_error, bool inject_side_effect) {
return std::make_unique<AddI32Thunk>(std::move(name), std::move(srcs),
std::move(dsts), trace, inject_error,
inject_side_effect);
}
std::vector<MaybeOwningDeviceMemory> AddI32Thunk::AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data) {
std::vector<MaybeOwningDeviceMemory> buffers;
for (auto& vec : data) {
buffers.emplace_back(
se::DeviceMemoryBase(vec->data(), vec->size() * sizeof(int32_t)));
}
return buffers;
}
AddI32Thunk::AddI32Thunk(std::string name,
std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace, bool inject_error,
bool inject_side_effect)
: Thunk(Kind::kKernel, Info{name}),
srcs_(std::move(srcs)),
dsts_(std::move(dsts)),
trace_(trace),
inject_error_(inject_error),
inject_side_effect_(inject_side_effect) {}
absl::Status AddI32Thunk::Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase src,
allocations->GetDeviceAddress(src_slice));
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase dst,
allocations->GetDeviceAddress(dst_slice));
CHECK_EQ(src.size() % sizeof(int32_t), 0);
CHECK_EQ(dst.size() % sizeof(int32_t), 0);
int32_t* src_ptr = static_cast<int32_t*>(src.opaque());
int32_t* dst_ptr = static_cast<int32_t*>(dst.opaque());
size_t len = std::min(src.size(), dst.size()) / sizeof(int32_t);
for (int j = 0; j < len; ++j) dst_ptr[j] += src_ptr[j];
return absl::OkStatus();
}
tsl::AsyncValueRef<Thunk::ExecuteEvent> AddI32Thunk::Execute(
const ExecuteParams& params) {
if (trace_) trace_->push_back(info().op_name);
auto execute = [&]() -> absl::Status {
CHECK_EQ(srcs_.size(), dsts_.size());
for (int i = 0; i < srcs_.size(); ++i) {
TF_RETURN_IF_ERROR(
Execute(params.buffer_allocations, srcs_.at(i), dsts_.at(i)));
}
return absl::OkStatus();
};
if (params.intra_op_threadpool) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
params.intra_op_threadpool->getPool()->Schedule([&, event, execute] {
if (inject_error_) {
event.SetError(absl::InternalError("Injected error"));
} else {
CHECK_OK(execute());
event.SetStateConcrete();
}
});
return event;
}
if (inject_error_) {
return tsl::MakeErrorAsyncValueRef(absl::InternalError("Injected error"));
}
TF_RETURN_IF_ERROR(execute());
return Thunk::OkExecuteEvent();
}
AddI32Thunk::BufferUses AddI32Thunk::buffer_uses() const {
BufferUses buffer_uses;
for (const auto& src : srcs_) buffer_uses.push_back(BufferUse::Read(src));
for (const auto& dst : dsts_) buffer_uses.push_back(BufferUse::Write(dst));
if (inject_side_effect_) {
static auto* fake_alloc = new BufferAllocation(0, 1, 0);
buffer_uses.push_back(
BufferUse::Write(BufferAllocation::Slice(fake_alloc, 0, 1)));
}
return buffer_uses;
}
TEST(ThunkExecutorTest, DependencyOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_FALSE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0, 1));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, SequentialOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_TRUE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, TransitiveReduction) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
EXPECT_THAT(executor.node_def(0).out_edges, ElementsAre(1));
EXPECT_THAT(executor.node_def(1).in_edges, ElementsAre(0));
EXPECT_THAT(executor.node_def(1).out_edges, ElementsAre(2));
EXPECT_THAT(executor.node_def(2).in_edges, ElementsAre(1));
}
TEST(ThunkExecutorTest, Execute) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
std::vector<std::string> trace;
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}, &trace));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}, &trace));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}, &trace));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
std::vector<int32_t> data(20, 1);
auto buffers = AddI32Thunk::AsDeviceMemory({&data});
BufferAllocations allocations(buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = executor.Execute(params, [&](ThunkExecutor::Task task) {
trace.push_back("<TaskRunner>");
task();
});
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_THAT(trace, ElementsAre("<TaskRunner>", "b", "a", "c"));
EXPECT_THAT(data, ElementsAre(2, 2, 2, 2, 2,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
2, 2, 2, 2, 2));
}
struct GeneratedThunkSequence {
BufferAllocation src_alloc;
BufferAllocation dst_alloc;
std::vector<int32_t> src;
std::vector<int32_t> dst;
std::vector<int32_t> expected;
std::vector<MaybeOwningDeviceMemory> expected_buffers;
std::vector<MaybeOwningDeviceMemory> buffers;
ThunkSequence sequence;
};
static absl::StatusOr<std::unique_ptr<GeneratedThunkSequence>>
GenerateThunkSequence(size_t num_elements, size_t num_thunks,
bool inject_errors, bool inject_side_effects) {
auto g = std::make_unique<GeneratedThunkSequence>(GeneratedThunkSequence{
BufferAllocation(0, num_elements * sizeof(int32_t), 0),
BufferAllocation(1, num_elements * sizeof(int32_t), 0),
std::vector<int32_t>(num_elements, 1),
std::vector<int32_t>(num_elements, 0),
std::vector<int32_t>(num_elements, 0),
});
g->expected_buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->expected});
g->buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->dst});
std::minstd_rand0 engine;
std::uniform_int_distribution<size_t> offset_dist(0, num_elements - 1);
std::uniform_int_distribution<size_t> size_dist(32, 64);
std::uniform_int_distribution<size_t> inject_error_dist(0, num_thunks / 10);
auto random_slice = [&](BufferAllocation* alloc) {
size_t start = offset_dist(engine);
size_t size = std::min(num_elements - start, size_dist(engine));
return BufferAllocation::Slice(alloc, start * sizeof(int32_t),
size * sizeof(int32_t));
};
for (int i = 0; i < num_thunks; ++i) {
BufferAllocation::Slice src = random_slice(&g->src_alloc);
BufferAllocation::Slice dst = random_slice(&g->dst_alloc);
BufferAllocations allocations(g->expected_buffers);
TF_RETURN_IF_ERROR(AddI32Thunk::Execute(&allocations, src, dst));
bool inject_error = inject_errors && inject_error_dist(engine) == 0;
g->sequence.push_back(AddI32Thunk::Create(absl::StrCat(i), {src}, {dst},
nullptr, inject_error,
inject_side_effects));
}
return g;
}
class ThunkExecutorStressTest
: public testing::TestWithParam<
std::tuple<int32_t, bool, bool, bool, bool>> {
public:
void SetUp() override {
auto& [_, use_task_runner, use_device, inject_errors, inject_side_effects] =
GetParam();
use_task_runner_ = use_task_runner;
use_device_ = use_device;
if (use_task_runner_ || use_device_) {
thread_pool_.emplace(tsl::Env::Default(), "thunk-executor", 8);
device_.emplace(thread_pool_->AsEigenThreadPool(),
thread_pool_->NumThreads());
}
}
ThunkExecutor::TaskRunner task_runner() {
if (!use_task_runner_) return nullptr;
return [&](ThunkExecutor::Task task) {
thread_pool_->Schedule(ToCopyableTask(std::move(task)));
};
}
Eigen::ThreadPoolDevice* device() {
if (!use_device_) return nullptr;
return &*device_;
}
private:
bool use_task_runner_;
bool use_device_;
std::optional<tsl::thread::ThreadPool> thread_pool_;
std::optional<Eigen::ThreadPoolDevice> device_;
};
TEST_P(ThunkExecutorStressTest, Execute) {
auto [num_thunks, use_task_runner, use_device, inject_errors,
inject_side_effects] = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GeneratedThunkSequence> g,
GenerateThunkSequence(1024, num_thunks, inject_errors,
inject_side_effects));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(g->sequence)));
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, device()};
auto execute_event = executor.Execute(params, task_runner());
tsl::BlockUntilReady(execute_event);
if (inject_errors) {
ASSERT_TRUE(execute_event.IsError());
EXPECT_EQ(execute_event.GetError(), absl::InternalError("Injected error"));
} else {
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_EQ(g->dst, g->expected);
}
}
INSTANTIATE_TEST_SUITE_P(ThunkExecutor, ThunkExecutorStressTest,
testing::Combine(testing::ValuesIn({10, 100, 1000}),
testing::Bool(), testing::Bool(),
testing::Bool(), testing::Bool()));
static void BM_SyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
auto g = GenerateThunkSequence(1024, num_thunks,
false,
false)
.value();
auto e = ThunkExecutor::Create(std::move(g->sequence)).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
for (auto _ : state) {
auto execute_event = e.Execute(params, nullptr);
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
static void BM_AsyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "thunk-executor", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
auto g = GenerateThunkSequence(1024, num_thunks,
false,
false)
.value();
auto e = ThunkExecutor::Create(std::move(g->sequence)).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, &device};
for (auto _ : state) {
auto execute_event = e.Execute(params, [&](ThunkExecutor::Task task) {
thread_pool.Schedule(ToCopyableTask(std::move(task)));
});
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
BENCHMARK(BM_SyncThunkExecutor)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(16)
->Arg(64)
->Arg(128)
->Arg(258)
->Arg(512);
BENCHMARK(BM_AsyncThunkExecutor)
->MeasureProcessCPUTime()
->Arg(1)
->Arg(16)
->Arg(64)
->Arg(128)
->Arg(258)
->Arg(512);
}
} | #include "xla/service/cpu/runtime/thunk_executor.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
ThunkExecutor::ThunkExecutor(ThunkSequence thunk_sequence,
std::vector<NodeDef> nodes_defs)
: thunk_sequence_(std::move(thunk_sequence)),
nodes_defs_(std::move(nodes_defs)),
is_sequential_(true) {
for (NodeId i = 0; i < nodes_defs_.size(); ++i) {
if (nodes_defs_[i].in_edges.empty()) {
source_.push_back(i);
}
if (nodes_defs_[i].out_edges.empty()) {
sink_.push_back(i);
}
}
int64_t num_erased_edges = TransitiveReduction();
for (NodeId i = 1; i < nodes_defs_.size() && is_sequential_; ++i) {
is_sequential_ &= (absl::c_count(nodes_defs_[i].in_edges, i - 1) != 0);
}
VLOG(2) << absl::StreamFormat(
"Constructed ThunkExecutor with %d nodes: #source_nodes=%d "
"#sink_nodes=%d, #erased_edges=%d, is_sequential=%v",
nodes_defs_.size(), source_.size(), sink_.size(), num_erased_edges,
is_sequential_);
DCHECK((!source_.empty() && !sink_.empty() && !thunk_sequence_.empty()) ||
(source_.empty() && sink_.empty() && thunk_sequence_.empty()));
} | TEST(ThunkExecutorTest, DependencyOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_FALSE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0, 1));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, SequentialOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_TRUE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
}
TEST(ThunkExecutorTest, TransitiveReduction) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
EXPECT_THAT(executor.node_def(0).out_edges, ElementsAre(1));
EXPECT_THAT(executor.node_def(1).in_edges, ElementsAre(0));
EXPECT_THAT(executor.node_def(1).out_edges, ElementsAre(2));
EXPECT_THAT(executor.node_def(2).in_edges, ElementsAre(1));
}
TEST(ThunkExecutorTest, Execute) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
std::vector<std::string> trace;
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}, &trace));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}, &trace));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}, &trace));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence)));
std::vector<int32_t> data(20, 1);
auto buffers = AddI32Thunk::AsDeviceMemory({&data});
BufferAllocations allocations(buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = executor.Execute(params, [&](ThunkExecutor::Task task) {
trace.push_back("<TaskRunner>");
task();
});
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_THAT(trace, ElementsAre("<TaskRunner>", "b", "a", "c"));
EXPECT_THAT(data, ElementsAre(2, 2, 2, 2, 2,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
2, 2, 2, 2, 2));
}
TEST_P(ThunkExecutorStressTest, Execute) {
auto [num_thunks, use_task_runner, use_device, inject_errors,
inject_side_effects] = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GeneratedThunkSequence> g,
GenerateThunkSequence(1024, num_thunks, inject_errors,
inject_side_effects));
TF_ASSERT_OK_AND_ASSIGN(ThunkExecutor executor,
ThunkExecutor::Create(std::move(g->sequence)));
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, device()};
auto execute_event = executor.Execute(params, task_runner());
tsl::BlockUntilReady(execute_event);
if (inject_errors) {
ASSERT_TRUE(execute_event.IsError());
EXPECT_EQ(execute_event.GetError(), absl::InternalError("Injected error"));
} else {
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_EQ(g->dst, g->expected);
}
} |
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace generator {
namespace cpp {
Renderer::Renderer(RendererContext context) : context_(context) {}
Renderer& Renderer::BlankLine() {
context_.code.AddLineWithoutIndent("");
return *this;
}
Renderer& Renderer::CodeLine(const string& text) {
context_.code.AddLineWithoutIndent(text);
return *this;
}
Renderer& Renderer::CodeLines(const string& text) {
StringPiece trimmed_text(text);
str_util::RemoveWhitespaceContext(&trimmed_text);
for (const string& line : str_util::Split(trimmed_text, '\n')) {
context_.code.AddLineWithoutIndent(line);
}
return *this;
}
Renderer& Renderer::Statement(const string& text) {
if (str_util::EndsWith(text, ";")) {
LOG(WARNING) << "Superfluous terminating ';' in '" << text << "'";
context_.code.AddLineWithIndent(text);
} else {
context_.code.AddLineWithIndent(absl::StrCat(text, ";"));
}
return *this;
}
Renderer& Renderer::TFStatement(const string& text) {
return Statement(absl::Substitute("TF_RETURN_IF_ERROR($0)", text));
}
Renderer& Renderer::CommentLine(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat("
return *this;
}
Renderer& Renderer::BlockOpen(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat(text, " {"));
context_.code.IncreaseIndent();
return *this;
}
Renderer& Renderer::BlockClose(const string& text) {
context_.code.DecreaseIndent();
context_.code.AddLineWithIndent(absl::StrCat("}", text));
return *this;
}
}
}
} | #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "tensorflow/c/experimental/ops/gen/common/path_config.h"
#include "tensorflow/c/experimental/ops/gen/common/source_code.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace cpp {
namespace {
TEST(Renderer, typical_usage) {
class TestRenderer : Renderer {
public:
explicit TestRenderer(SourceCode& code)
: Renderer(
{RendererContext::kSource, code, CppConfig(), PathConfig()}) {}
void Render() {
CommentLine("File level comment.");
CodeLine("#include \"header.h\"");
BlankLine();
BlockOpen("void TestFunction()");
{
Statement("int i = 1");
BlankLine();
BlockOpen("while (i == 1)");
{
CommentLine("Do nothing, really....");
CodeLine("#if 0");
Statement("call()");
CodeLine("#endif");
BlockClose();
}
BlockClose("
}
}
};
SourceCode code;
TestRenderer(code).Render();
string expected = R"(
#include "header.h"
void TestFunction() {
int i = 1;
while (i == 1) {
#if 0
call();
#endif
}
}
)";
code.SetSpacesPerIndent(3);
EXPECT_EQ(expected, code.Render());
}
}
}
}
} | Renderer& Renderer::CommentLine(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat("
return *this;
} | #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "tensorflow/c/experimental/ops/gen/common/path_config.h"
#include "tensorflow/c/experimental/ops/gen/common/source_code.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace cpp {
namespace {
TEST(Renderer, typical_usage) {
class TestRenderer : Renderer {
public:
explicit TestRenderer(SourceCode& code)
: Renderer(
{RendererContext::kSource, code, CppConfig(), PathConfig()}) {}
void Render() {
CommentLine("File level comment.");
CodeLine("#include \"header.h\"");
BlankLine();
BlockOpen("void TestFunction()");
{
Statement("int i = 1");
BlankLine();
BlockOpen("while (i == 1)");
{
CommentLine("Do nothing, really....");
CodeLine("#if 0");
Statement("call()");
CodeLine("#endif");
BlockClose();
}
BlockClose("
}
}
};
SourceCode code;
TestRenderer(code).Render();
string expected = R"(
#include "header.h"
void TestFunction() {
int i = 1;
while (i == 1) {
#if 0
call();
#endif
}
}
)";
code.SetSpacesPerIndent(3);
EXPECT_EQ(expected, code.Render());
} |
#include "absl/random/internal/chi_square.h"
#include <cmath>
#include "absl/random/internal/distribution_test_util.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
namespace {
#if defined(__EMSCRIPTEN__)
inline double fma(double x, double y, double z) {
return (x * y) + z;
}
#endif
template <typename T, unsigned N>
inline T EvaluatePolynomial(T x, const T (&poly)[N]) {
#if !defined(__EMSCRIPTEN__)
using std::fma;
#endif
T p = poly[N - 1];
for (unsigned i = 2; i <= N; i++) {
p = fma(p, x, poly[N - i]);
}
return p;
}
static constexpr int kLargeDOF = 150;
double POZ(double z) {
static constexpr double kP1[] = {
0.797884560593, -0.531923007300, 0.319152932694,
-0.151968751364, 0.059054035642, -0.019198292004,
0.005198775019, -0.001075204047, 0.000124818987,
};
static constexpr double kP2[] = {
0.999936657524, 0.000535310849, -0.002141268741, 0.005353579108,
-0.009279453341, 0.011630447319, -0.010557625006, 0.006549791214,
-0.002034254874, -0.000794620820, 0.001390604284, -0.000676904986,
-0.000019538132, 0.000152529290, -0.000045255659,
};
const double kZMax = 6.0;
if (z == 0.0) {
return 0.5;
}
double x;
double y = 0.5 * std::fabs(z);
if (y >= (kZMax * 0.5)) {
x = 1.0;
} else if (y < 1.0) {
double w = y * y;
x = EvaluatePolynomial(w, kP1) * y * 2.0;
} else {
y -= 2.0;
x = EvaluatePolynomial(y, kP2);
}
return z > 0.0 ? ((x + 1.0) * 0.5) : ((1.0 - x) * 0.5);
}
double normal_survival(double z) {
static constexpr double kR[] = {
1.0, 0.196854, 0.115194, 0.000344, 0.019527,
};
double r = EvaluatePolynomial(z, kR);
r *= r;
return 0.5 / (r * r);
}
}
double ChiSquareValue(int dof, double p) {
static constexpr double kChiEpsilon =
0.000001;
static constexpr double kChiMax =
99999.0;
const double p_value = 1.0 - p;
if (dof < 1 || p_value > 1.0) {
return 0.0;
}
if (dof > kLargeDOF) {
const double z = InverseNormalSurvival(p_value);
const double mean = 1 - 2.0 / (9 * dof);
const double variance = 2.0 / (9 * dof);
if (variance != 0) {
double term = z * std::sqrt(variance) + mean;
return dof * (term * term * term);
}
}
if (p_value <= 0.0) return kChiMax;
double min_chisq = 0.0;
double max_chisq = kChiMax;
double current = dof / std::sqrt(p_value);
while ((max_chisq - min_chisq) > kChiEpsilon) {
if (ChiSquarePValue(current, dof) < p_value) {
max_chisq = current;
} else {
min_chisq = current;
}
current = (max_chisq + min_chisq) * 0.5;
}
return current;
}
double ChiSquarePValue(double chi_square, int dof) {
static constexpr double kLogSqrtPi =
0.5723649429247000870717135;
static constexpr double kInverseSqrtPi =
0.5641895835477562869480795;
if (dof > kLargeDOF) {
const double chi_square_scaled = std::pow(chi_square / dof, 1.0 / 3);
const double mean = 1 - 2.0 / (9 * dof);
const double variance = 2.0 / (9 * dof);
if (variance != 0) {
const double z = (chi_square_scaled - mean) / std::sqrt(variance);
if (z > 0) {
return normal_survival(z);
} else if (z < 0) {
return 1.0 - normal_survival(-z);
} else {
return 0.5;
}
}
}
if (chi_square <= 0.0) return 1.0;
if (dof < 1) return 0;
auto capped_exp = [](double x) { return x < -20 ? 0.0 : std::exp(x); };
static constexpr double kBigX = 20;
double a = 0.5 * chi_square;
const bool even = !(dof & 1);
const double y = capped_exp(-a);
double s = even ? y : (2.0 * POZ(-std::sqrt(chi_square)));
if (dof <= 2) {
return s;
}
chi_square = 0.5 * (dof - 1.0);
double z = (even ? 1.0 : 0.5);
if (a > kBigX) {
double e = (even ? 0.0 : kLogSqrtPi);
double c = std::log(a);
while (z <= chi_square) {
e = std::log(z) + e;
s += capped_exp(c * z - a - e);
z += 1.0;
}
return s;
}
double e = (even ? 1.0 : (kInverseSqrtPi / std::sqrt(a)));
double c = 0.0;
while (z <= chi_square) {
e = e * (a / z);
c = c + e;
z += 1.0;
}
return c * y + s;
}
}
ABSL_NAMESPACE_END
} | #include "absl/random/internal/chi_square.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <numeric>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/macros.h"
using absl::random_internal::ChiSquare;
using absl::random_internal::ChiSquarePValue;
using absl::random_internal::ChiSquareValue;
using absl::random_internal::ChiSquareWithExpected;
namespace {
TEST(ChiSquare, Value) {
struct {
int line;
double chi_square;
int df;
double confidence;
} const specs[] = {
{__LINE__, 0, 0, 0.01},
{__LINE__, 0.00016, 1, 0.01},
{__LINE__, 1.64650, 8, 0.01},
{__LINE__, 5.81221, 16, 0.01},
{__LINE__, 156.4319, 200, 0.01},
{__LINE__, 1121.3784, 1234, 0.01},
{__LINE__, 53557.1629, 54321, 0.01},
{__LINE__, 651662.6647, 654321, 0.01},
{__LINE__, 0, 0, 0.99},
{__LINE__, 6.635, 1, 0.99},
{__LINE__, 20.090, 8, 0.99},
{__LINE__, 32.000, 16, 0.99},
{__LINE__, 249.4456, 200, 0.99},
{__LINE__, 1131.1573, 1023, 0.99},
{__LINE__, 1352.5038, 1234, 0.99},
{__LINE__, 55090.7356, 54321, 0.99},
{__LINE__, 656985.1514, 654321, 0.99},
{__LINE__, 16.2659, 3, 0.999},
{__LINE__, 22.4580, 6, 0.999},
{__LINE__, 267.5409, 200, 0.999},
{__LINE__, 1168.5033, 1023, 0.999},
{__LINE__, 55345.1741, 54321, 0.999},
{__LINE__, 657861.7284, 654321, 0.999},
{__LINE__, 51.1772, 24, 0.999},
{__LINE__, 59.7003, 30, 0.999},
{__LINE__, 37.6984, 15, 0.999},
{__LINE__, 29.5898, 10, 0.999},
{__LINE__, 27.8776, 9, 0.999},
{__LINE__, 0.000157088, 1, 0.01},
{__LINE__, 5.31852, 2, 0.93},
{__LINE__, 1.92256, 4, 0.25},
{__LINE__, 10.7709, 13, 0.37},
{__LINE__, 26.2514, 17, 0.93},
{__LINE__, 36.4799, 29, 0.84},
{__LINE__, 25.818, 31, 0.27},
{__LINE__, 63.3346, 64, 0.50},
{__LINE__, 196.211, 128, 0.9999},
{__LINE__, 215.21, 243, 0.10},
{__LINE__, 285.393, 256, 0.90},
{__LINE__, 984.504, 1024, 0.1923},
{__LINE__, 2043.85, 2048, 0.4783},
{__LINE__, 48004.6, 48273, 0.194},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.line);
const double val = ChiSquareValue(spec.df, spec.confidence);
const double err = std::max(5e-6, spec.chi_square / 5e3);
EXPECT_NEAR(spec.chi_square, val, err) << spec.line;
}
EXPECT_NEAR(49.2680, ChiSquareValue(100, 1e-6), 5);
EXPECT_NEAR(123.499, ChiSquareValue(200, 1e-6), 5);
EXPECT_NEAR(149.449, ChiSquareValue(100, 0.999), 0.01);
EXPECT_NEAR(161.318, ChiSquareValue(100, 0.9999), 0.01);
EXPECT_NEAR(172.098, ChiSquareValue(100, 0.99999), 0.01);
EXPECT_NEAR(381.426, ChiSquareValue(300, 0.999), 0.05);
EXPECT_NEAR(399.756, ChiSquareValue(300, 0.9999), 0.1);
EXPECT_NEAR(416.126, ChiSquareValue(300, 0.99999), 0.2);
}
TEST(ChiSquareTest, PValue) {
struct {
int line;
double pval;
double chi_square;
int df;
} static const specs[] = {
{__LINE__, 1, 0, 0},
{__LINE__, 0, 0.001, 0},
{__LINE__, 1.000, 0, 453},
{__LINE__, 0.134471, 7972.52, 7834},
{__LINE__, 0.203922, 28.32, 23},
{__LINE__, 0.737171, 48274, 48472},
{__LINE__, 0.444146, 583.1234, 579},
{__LINE__, 0.294814, 138.2, 130},
{__LINE__, 0.0816532, 12.63, 7},
{__LINE__, 0, 682.32, 67},
{__LINE__, 0.49405, 999, 999},
{__LINE__, 1.000, 0, 9999},
{__LINE__, 0.997477, 0.00001, 1},
{__LINE__, 0, 5823.21, 5040},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.line);
const double pval = ChiSquarePValue(spec.chi_square, spec.df);
EXPECT_NEAR(spec.pval, pval, 1e-3);
}
}
TEST(ChiSquareTest, CalcChiSquare) {
struct {
int line;
std::vector<int> expected;
std::vector<int> actual;
} const specs[] = {
{__LINE__,
{56, 234, 76, 1, 546, 1, 87, 345, 1, 234},
{2, 132, 4, 43, 234, 8, 345, 8, 236, 56}},
{__LINE__,
{123, 36, 234, 367, 345, 2, 456, 567, 234, 567},
{123, 56, 2345, 8, 345, 8, 2345, 23, 48, 267}},
{__LINE__,
{123, 234, 345, 456, 567, 678, 789, 890, 98, 76},
{123, 234, 345, 456, 567, 678, 789, 890, 98, 76}},
{__LINE__, {3, 675, 23, 86, 2, 8, 2}, {456, 675, 23, 86, 23, 65, 2}},
{__LINE__, {1}, {23}},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.line);
double chi_square = 0;
for (int i = 0; i < spec.expected.size(); ++i) {
const double diff = spec.actual[i] - spec.expected[i];
chi_square += (diff * diff) / spec.expected[i];
}
EXPECT_NEAR(chi_square,
ChiSquare(std::begin(spec.actual), std::end(spec.actual),
std::begin(spec.expected), std::end(spec.expected)),
1e-5);
}
}
TEST(ChiSquareTest, CalcChiSquareInt64) {
const int64_t data[3] = {910293487, 910292491, 910216780};
double sum = std::accumulate(std::begin(data), std::end(data), double{0});
size_t n = std::distance(std::begin(data), std::end(data));
double a = ChiSquareWithExpected(std::begin(data), std::end(data), sum / n);
EXPECT_NEAR(4.254101, a, 1e-6);
double b =
ChiSquareWithExpected(std::begin(data), std::end(data), 910267586.0);
EXPECT_NEAR(4.254101, b, 1e-6);
}
TEST(ChiSquareTest, TableData) {
const double data[100][5] = {
{2.706, 3.841, 5.024, 6.635, 10.828},
{4.605, 5.991, 7.378, 9.210, 13.816},
{6.251, 7.815, 9.348, 11.345, 16.266},
{7.779, 9.488, 11.143, 13.277, 18.467},
{9.236, 11.070, 12.833, 15.086, 20.515},
{10.645, 12.592, 14.449, 16.812, 22.458},
{12.017, 14.067, 16.013, 18.475, 24.322},
{13.362, 15.507, 17.535, 20.090, 26.125},
{14.684, 16.919, 19.023, 21.666, 27.877},
{15.987, 18.307, 20.483, 23.209, 29.588},
{17.275, 19.675, 21.920, 24.725, 31.264},
{18.549, 21.026, 23.337, 26.217, 32.910},
{19.812, 22.362, 24.736, 27.688, 34.528},
{21.064, 23.685, 26.119, 29.141, 36.123},
{22.307, 24.996, 27.488, 30.578, 37.697},
{23.542, 26.296, 28.845, 32.000, 39.252},
{24.769, 27.587, 30.191, 33.409, 40.790},
{25.989, 28.869, 31.526, 34.805, 42.312},
{27.204, 30.144, 32.852, 36.191, 43.820},
{28.412, 31.410, 34.170, 37.566, 45.315},
{29.615, 32.671, 35.479, 38.932, 46.797},
{30.813, 33.924, 36.781, 40.289, 48.268},
{32.007, 35.172, 38.076, 41.638, 49.728},
{33.196, 36.415, 39.364, 42.980, 51.179},
{34.382, 37.652, 40.646, 44.314, 52.620},
{35.563, 38.885, 41.923, 45.642, 54.052},
{36.741, 40.113, 43.195, 46.963, 55.476},
{37.916, 41.337, 44.461, 48.278, 56.892},
{39.087, 42.557, 45.722, 49.588, 58.301},
{40.256, 43.773, 46.979, 50.892, 59.703},
{41.422, 44.985, 48.232, 52.191, 61.098},
{42.585, 46.194, 49.480, 53.486, 62.487},
{43.745, 47.400, 50.725, 54.776, 63.870},
{44.903, 48.602, 51.966, 56.061, 65.247},
{46.059, 49.802, 53.203, 57.342, 66.619},
{47.212, 50.998, 54.437, 58.619, 67.985},
{48.363, 52.192, 55.668, 59.893, 69.347},
{49.513, 53.384, 56.896, 61.162, 70.703},
{50.660, 54.572, 58.120, 62.428, 72.055},
{51.805, 55.758, 59.342, 63.691, 73.402},
{52.949, 56.942, 60.561, 64.950, 74.745},
{54.090, 58.124, 61.777, 66.206, 76.084},
{55.230, 59.304, 62.990, 67.459, 77.419},
{56.369, 60.481, 64.201, 68.710, 78.750},
{57.505, 61.656, 65.410, 69.957, 80.077},
{58.641, 62.830, 66.617, 71.201, 81.400},
{59.774, 64.001, 67.821, 72.443, 82.720},
{60.907, 65.171, 69.023, 73.683, 84.037},
{62.038, 66.339, 70.222, 74.919, 85.351},
{63.167, 67.505, 71.420, 76.154, 86.661},
{64.295, 68.669, 72.616, 77.386, 87.968},
{65.422, 69.832, 73.810, 78.616, 89.272},
{66.548, 70.993, 75.002, 79.843, 90.573},
{67.673, 72.153, 76.192, 81.069, 91.872},
{68.796, 73.311, 77.380, 82.292, 93.168},
{69.919, 74.468, 78.567, 83.513, 94.461},
{71.040, 75.624, 79.752, 84.733, 95.751},
{72.160, 76.778, 80.936, 85.950, 97.039},
{73.279, 77.931, 82.117, 87.166, 98.324},
{74.397, 79.082, 83.298, 88.379, 99.607},
{75.514, 80.232, 84.476, 89.591, 100.888},
{76.630, 81.381, 85.654, 90.802, 102.166},
{77.745, 82.529, 86.830, 92.010, 103.442},
{78.860, 83.675, 88.004, 93.217, 104.716},
{79.973, 84.821, 89.177, 94.422, 105.988},
{81.085, 85.965, 90.349, 95.626, 107.258},
{82.197, 87.108, 91.519, 96.828, 108.526},
{83.308, 88.250, 92.689, 98.028, 109.791},
{84.418, 89.391, 93.856, 99.228, 111.055},
{85.527, 90.531, 95.023, 100.425, 112.317},
{86.635, 91.670, 96.189, 101.621, 113.577},
{87.743, 92.808, 97.353, 102.816, 114.835},
{88.850, 93.945, 98.516, 104.010, 116.092},
{89.956, 95.081, 99.678, 105.202, 117.346},
{91.061, 96.217, 100.839, 106.393, 118.599},
{92.166, 97.351, 101.999, 107.583, 119.850},
{93.270, 98.484, 103.158, 108.771, 121.100},
{94.374, 99.617, 104.316, 109.958, 122.348},
{95.476, 100.749, 105.473, 111.144, 123.594},
{96.578, 101.879, 106.629, 112.329, 124.839},
{97.680, 103.010, 107.783, 113.512, 126.083},
{98.780, 104.139, 108.937, 114.695, 127.324},
{99.880, 105.267, 110.090, 115.876, 128.565},
{100.980, 106.395, 111.242, 117.057, 129.804},
{102.079, 107.522, 112.393, 118.236, 131.041},
{103.177, 108.648, 113.544, 119.414, 132.277},
{104.275, 109.773, 114.693, 120.591, 133.512},
{105.372, 110.898, 115.841, 121.767, 134.746},
{106.469, 112.022, 116.989, 122.942, 135.978},
{107.565, 113.145, 118.136, 124.116, 137.208},
{108.661, 114.268, 119.282, 125.289, 138.438},
{109.756, 115.390, 120.427, 126.462, 139.666},
{110.850, 116.511, 121.571, 127.633, 140.893},
{111.944, 117.632, 122.715, 128.803, 142.119},
{113.038, 118.752, 123.858, 129.973, 143.344},
{114.131, 119.871, 125.000, 131.141, 144.567},
{115.223, 120.990, 126.141, 132.309, 145.789},
{116.315, 122.108, 127.282, 133.476, 147.010},
{117.407, 123.225, 128.422, 134.642, 148.230},
{118.498, 124.342, 129.561, 135.807, 149.449}
};
for (int i = 0; i < ABSL_ARRAYSIZE(data); i++) {
const double E = 0.0001;
EXPECT_NEAR(ChiSquarePValue(data[i][0], i + 1), 0.10, E)
<< i << " " << data[i][0];
EXPECT_NEAR(ChiSquarePValue(data[i][1], i + 1), 0.05, E)
<< i << " " << data[i][1];
EXPECT_NEAR(ChiSquarePValue(data[i][2], i + 1), 0.025, E)
<< i << " " << data[i][2];
EXPECT_NEAR(ChiSquarePValue(data[i][3], i + 1), 0.01, E)
<< i << " " << data[i][3];
EXPECT_NEAR(ChiSquarePValue(data[i][4], i + 1), 0.001, E)
<< i << " " << data[i][4];
const double F = 0.1;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.90), data[i][0], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.95), data[i][1], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.975), data[i][2], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.99), data[i][3], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.999), data[i][4], F) << i;
}
}
TEST(ChiSquareTest, ChiSquareTwoIterator) {
const int counts[10] = {6, 6, 18, 33, 38, 38, 28, 21, 9, 3};
const double expected[10] = {4.6, 8.8, 18.4, 30.0, 38.2,
38.2, 30.0, 18.4, 8.8, 4.6};
double chi_square = ChiSquare(std::begin(counts), std::end(counts),
std::begin(expected), std::end(expected));
EXPECT_NEAR(chi_square, 2.69, 0.001);
const int dof = 7;
double p_value_05 = ChiSquarePValue(14.067, dof);
EXPECT_NEAR(p_value_05, 0.05, 0.001);
double p_actual = ChiSquarePValue(chi_square, dof);
EXPECT_GT(p_actual, 0.05);
}
TEST(ChiSquareTest, DiceRolls) {
const int rolls[6] = {22, 11, 17, 14, 20, 18};
double sum = std::accumulate(std::begin(rolls), std::end(rolls), double{0});
size_t n = std::distance(std::begin(rolls), std::end(rolls));
double a = ChiSquareWithExpected(std::begin(rolls), std::end(rolls), sum / n);
EXPECT_NEAR(a, 4.70588, 1e-5);
EXPECT_LT(a, ChiSquareValue(4, 0.95));
double p_a = ChiSquarePValue(a, 4);
EXPECT_NEAR(p_a, 0.318828, 1e-5);
double b = ChiSquareWithExpected(std::begin(rolls), std::end(rolls), 17.0);
EXPECT_NEAR(b, 4.70588, 1e-5);
EXPECT_LT(b, ChiSquareValue(5, 0.95));
double p_b = ChiSquarePValue(b, 5);
EXPECT_NEAR(p_b, 0.4528180, 1e-5);
}
} | double ChiSquareValue(int dof, double p) {
static constexpr double kChiEpsilon =
0.000001;
static constexpr double kChiMax =
99999.0;
const double p_value = 1.0 - p;
if (dof < 1 || p_value > 1.0) {
return 0.0;
}
if (dof > kLargeDOF) {
const double z = InverseNormalSurvival(p_value);
const double mean = 1 - 2.0 / (9 * dof);
const double variance = 2.0 / (9 * dof);
if (variance != 0) {
double term = z * std::sqrt(variance) + mean;
return dof * (term * term * term);
}
}
if (p_value <= 0.0) return kChiMax;
double min_chisq = 0.0;
double max_chisq = kChiMax;
double current = dof / std::sqrt(p_value);
while ((max_chisq - min_chisq) > kChiEpsilon) {
if (ChiSquarePValue(current, dof) < p_value) {
max_chisq = current;
} else {
min_chisq = current;
}
current = (max_chisq + min_chisq) * 0.5;
}
return current;
} | namespace {
TEST(ChiSquare, Value) {
struct {
int line;
double chi_square;
int df;
double confidence;
} const specs[] = {
{__LINE__, 0, 0, 0.01},
{__LINE__, 0.00016, 1, 0.01},
{__LINE__, 1.64650, 8, 0.01},
{__LINE__, 5.81221, 16, 0.01},
{__LINE__, 156.4319, 200, 0.01},
{__LINE__, 1121.3784, 1234, 0.01},
{__LINE__, 53557.1629, 54321, 0.01},
{__LINE__, 651662.6647, 654321, 0.01},
{__LINE__, 0, 0, 0.99},
{__LINE__, 6.635, 1, 0.99},
{__LINE__, 20.090, 8, 0.99},
{__LINE__, 32.000, 16, 0.99},
{__LINE__, 249.4456, 200, 0.99},
{__LINE__, 1131.1573, 1023, 0.99},
{__LINE__, 1352.5038, 1234, 0.99},
{__LINE__, 55090.7356, 54321, 0.99},
{__LINE__, 656985.1514, 654321, 0.99},
{__LINE__, 16.2659, 3, 0.999},
{__LINE__, 22.4580, 6, 0.999},
{__LINE__, 267.5409, 200, 0.999},
{__LINE__, 1168.5033, 1023, 0.999},
{__LINE__, 55345.1741, 54321, 0.999},
{__LINE__, 657861.7284, 654321, 0.999},
{__LINE__, 51.1772, 24, 0.999},
{__LINE__, 59.7003, 30, 0.999},
{__LINE__, 37.6984, 15, 0.999},
{__LINE__, 29.5898, 10, 0.999},
{__LINE__, 27.8776, 9, 0.999},
{__LINE__, 0.000157088, 1, 0.01},
{__LINE__, 5.31852, 2, 0.93},
{__LINE__, 1.92256, 4, 0.25},
{__LINE__, 10.7709, 13, 0.37},
{__LINE__, 26.2514, 17, 0.93},
{__LINE__, 36.4799, 29, 0.84},
{__LINE__, 25.818, 31, 0.27},
{__LINE__, 63.3346, 64, 0.50},
{__LINE__, 196.211, 128, 0.9999},
{__LINE__, 215.21, 243, 0.10},
{__LINE__, 285.393, 256, 0.90},
{__LINE__, 984.504, 1024, 0.1923},
{__LINE__, 2043.85, 2048, 0.4783},
{__LINE__, 48004.6, 48273, 0.194},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.line);
const double val = ChiSquareValue(spec.df, spec.confidence);
const double err = std::max(5e-6, spec.chi_square / 5e3);
EXPECT_NEAR(spec.chi_square, val, err) << spec.line;
}
EXPECT_NEAR(49.2680, ChiSquareValue(100, 1e-6), 5);
EXPECT_NEAR(123.499, ChiSquareValue(200, 1e-6), 5);
EXPECT_NEAR(149.449, ChiSquareValue(100, 0.999), 0.01);
EXPECT_NEAR(161.318, ChiSquareValue(100, 0.9999), 0.01);
EXPECT_NEAR(172.098, ChiSquareValue(100, 0.99999), 0.01);
EXPECT_NEAR(381.426, ChiSquareValue(300, 0.999), 0.05);
EXPECT_NEAR(399.756, ChiSquareValue(300, 0.9999), 0.1);
EXPECT_NEAR(416.126, ChiSquareValue(300, 0.99999), 0.2);
}
TEST(ChiSquareTest, TableData) {
const double data[100][5] = {
{2.706, 3.841, 5.024, 6.635, 10.828},
{4.605, 5.991, 7.378, 9.210, 13.816},
{6.251, 7.815, 9.348, 11.345, 16.266},
{7.779, 9.488, 11.143, 13.277, 18.467},
{9.236, 11.070, 12.833, 15.086, 20.515},
{10.645, 12.592, 14.449, 16.812, 22.458},
{12.017, 14.067, 16.013, 18.475, 24.322},
{13.362, 15.507, 17.535, 20.090, 26.125},
{14.684, 16.919, 19.023, 21.666, 27.877},
{15.987, 18.307, 20.483, 23.209, 29.588},
{17.275, 19.675, 21.920, 24.725, 31.264},
{18.549, 21.026, 23.337, 26.217, 32.910},
{19.812, 22.362, 24.736, 27.688, 34.528},
{21.064, 23.685, 26.119, 29.141, 36.123},
{22.307, 24.996, 27.488, 30.578, 37.697},
{23.542, 26.296, 28.845, 32.000, 39.252},
{24.769, 27.587, 30.191, 33.409, 40.790},
{25.989, 28.869, 31.526, 34.805, 42.312},
{27.204, 30.144, 32.852, 36.191, 43.820},
{28.412, 31.410, 34.170, 37.566, 45.315},
{29.615, 32.671, 35.479, 38.932, 46.797},
{30.813, 33.924, 36.781, 40.289, 48.268},
{32.007, 35.172, 38.076, 41.638, 49.728},
{33.196, 36.415, 39.364, 42.980, 51.179},
{34.382, 37.652, 40.646, 44.314, 52.620},
{35.563, 38.885, 41.923, 45.642, 54.052},
{36.741, 40.113, 43.195, 46.963, 55.476},
{37.916, 41.337, 44.461, 48.278, 56.892},
{39.087, 42.557, 45.722, 49.588, 58.301},
{40.256, 43.773, 46.979, 50.892, 59.703},
{41.422, 44.985, 48.232, 52.191, 61.098},
{42.585, 46.194, 49.480, 53.486, 62.487},
{43.745, 47.400, 50.725, 54.776, 63.870},
{44.903, 48.602, 51.966, 56.061, 65.247},
{46.059, 49.802, 53.203, 57.342, 66.619},
{47.212, 50.998, 54.437, 58.619, 67.985},
{48.363, 52.192, 55.668, 59.893, 69.347},
{49.513, 53.384, 56.896, 61.162, 70.703},
{50.660, 54.572, 58.120, 62.428, 72.055},
{51.805, 55.758, 59.342, 63.691, 73.402},
{52.949, 56.942, 60.561, 64.950, 74.745},
{54.090, 58.124, 61.777, 66.206, 76.084},
{55.230, 59.304, 62.990, 67.459, 77.419},
{56.369, 60.481, 64.201, 68.710, 78.750},
{57.505, 61.656, 65.410, 69.957, 80.077},
{58.641, 62.830, 66.617, 71.201, 81.400},
{59.774, 64.001, 67.821, 72.443, 82.720},
{60.907, 65.171, 69.023, 73.683, 84.037},
{62.038, 66.339, 70.222, 74.919, 85.351},
{63.167, 67.505, 71.420, 76.154, 86.661},
{64.295, 68.669, 72.616, 77.386, 87.968},
{65.422, 69.832, 73.810, 78.616, 89.272},
{66.548, 70.993, 75.002, 79.843, 90.573},
{67.673, 72.153, 76.192, 81.069, 91.872},
{68.796, 73.311, 77.380, 82.292, 93.168},
{69.919, 74.468, 78.567, 83.513, 94.461},
{71.040, 75.624, 79.752, 84.733, 95.751},
{72.160, 76.778, 80.936, 85.950, 97.039},
{73.279, 77.931, 82.117, 87.166, 98.324},
{74.397, 79.082, 83.298, 88.379, 99.607},
{75.514, 80.232, 84.476, 89.591, 100.888},
{76.630, 81.381, 85.654, 90.802, 102.166},
{77.745, 82.529, 86.830, 92.010, 103.442},
{78.860, 83.675, 88.004, 93.217, 104.716},
{79.973, 84.821, 89.177, 94.422, 105.988},
{81.085, 85.965, 90.349, 95.626, 107.258},
{82.197, 87.108, 91.519, 96.828, 108.526},
{83.308, 88.250, 92.689, 98.028, 109.791},
{84.418, 89.391, 93.856, 99.228, 111.055},
{85.527, 90.531, 95.023, 100.425, 112.317},
{86.635, 91.670, 96.189, 101.621, 113.577},
{87.743, 92.808, 97.353, 102.816, 114.835},
{88.850, 93.945, 98.516, 104.010, 116.092},
{89.956, 95.081, 99.678, 105.202, 117.346},
{91.061, 96.217, 100.839, 106.393, 118.599},
{92.166, 97.351, 101.999, 107.583, 119.850},
{93.270, 98.484, 103.158, 108.771, 121.100},
{94.374, 99.617, 104.316, 109.958, 122.348},
{95.476, 100.749, 105.473, 111.144, 123.594},
{96.578, 101.879, 106.629, 112.329, 124.839},
{97.680, 103.010, 107.783, 113.512, 126.083},
{98.780, 104.139, 108.937, 114.695, 127.324},
{99.880, 105.267, 110.090, 115.876, 128.565},
{100.980, 106.395, 111.242, 117.057, 129.804},
{102.079, 107.522, 112.393, 118.236, 131.041},
{103.177, 108.648, 113.544, 119.414, 132.277},
{104.275, 109.773, 114.693, 120.591, 133.512},
{105.372, 110.898, 115.841, 121.767, 134.746},
{106.469, 112.022, 116.989, 122.942, 135.978},
{107.565, 113.145, 118.136, 124.116, 137.208},
{108.661, 114.268, 119.282, 125.289, 138.438},
{109.756, 115.390, 120.427, 126.462, 139.666},
{110.850, 116.511, 121.571, 127.633, 140.893},
{111.944, 117.632, 122.715, 128.803, 142.119},
{113.038, 118.752, 123.858, 129.973, 143.344},
{114.131, 119.871, 125.000, 131.141, 144.567},
{115.223, 120.990, 126.141, 132.309, 145.789},
{116.315, 122.108, 127.282, 133.476, 147.010},
{117.407, 123.225, 128.422, 134.642, 148.230},
{118.498, 124.342, 129.561, 135.807, 149.449}
};
for (int i = 0; i < ABSL_ARRAYSIZE(data); i++) {
const double E = 0.0001;
EXPECT_NEAR(ChiSquarePValue(data[i][0], i + 1), 0.10, E)
<< i << " " << data[i][0];
EXPECT_NEAR(ChiSquarePValue(data[i][1], i + 1), 0.05, E)
<< i << " " << data[i][1];
EXPECT_NEAR(ChiSquarePValue(data[i][2], i + 1), 0.025, E)
<< i << " " << data[i][2];
EXPECT_NEAR(ChiSquarePValue(data[i][3], i + 1), 0.01, E)
<< i << " " << data[i][3];
EXPECT_NEAR(ChiSquarePValue(data[i][4], i + 1), 0.001, E)
<< i << " " << data[i][4];
const double F = 0.1;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.90), data[i][0], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.95), data[i][1], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.975), data[i][2], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.99), data[i][3], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.999), data[i][4], F) << i;
}
} |
#include "xla/tsl/distributed_runtime/call_options.h"
#include <utility>
#include "tsl/platform/mutex.h"
namespace tsl {
CallOptions::CallOptions() = default;
void CallOptions::StartCancel() {
mutex_lock l(mu_);
if (cancel_func_ != nullptr) {
cancel_func_();
}
}
void CallOptions::SetCancelCallback(CancelFunction cancel_func) {
mutex_lock l(mu_);
cancel_func_ = std::move(cancel_func);
}
void CallOptions::ClearCancelCallback() {
mutex_lock l(mu_);
cancel_func_ = nullptr;
}
int64_t CallOptions::GetTimeout() {
mutex_lock l(mu_);
return timeout_in_ms_;
}
void CallOptions::SetTimeout(int64_t ms) {
mutex_lock l(mu_);
timeout_in_ms_ = ms;
}
} | #include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CallOptions, Cancel) {
int num_calls = 0;
CallOptions opts;
opts.StartCancel();
EXPECT_EQ(num_calls, 0);
opts.SetCancelCallback([&num_calls]() { num_calls++; });
EXPECT_EQ(num_calls, 0);
opts.StartCancel();
EXPECT_EQ(num_calls, 1);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
opts.ClearCancelCallback();
EXPECT_EQ(num_calls, 2);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
}
} | void CallOptions::ClearCancelCallback() {
mutex_lock l(mu_);
cancel_func_ = nullptr;
} | #include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CallOptions, Cancel) {
int num_calls = 0;
CallOptions opts;
opts.StartCancel();
EXPECT_EQ(num_calls, 0);
opts.SetCancelCallback([&num_calls]() { num_calls++; });
EXPECT_EQ(num_calls, 0);
opts.StartCancel();
EXPECT_EQ(num_calls, 1);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
opts.ClearCancelCallback();
EXPECT_EQ(num_calls, 2);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
} |
#include "tsl/lib/monitoring/collection_registry.h"
#ifndef IS_MOBILE_PLATFORM
#include "tsl/platform/logging.h"
namespace tsl {
namespace monitoring {
namespace internal {
void Collector::CollectMetricValues(
const CollectionRegistry::CollectionInfo& info) {
info.collection_function(MetricCollectorGetter(
this, info.metric_def, info.registration_time_millis));
}
std::unique_ptr<CollectedMetrics> Collector::ConsumeCollectedMetrics() {
mutex_lock l(mu_);
return std::move(collected_metrics_);
}
void Collector::CollectMetricDescriptor(
const AbstractMetricDef* const metric_def) {
auto* const metric_descriptor = [&]() {
mutex_lock l(mu_);
return collected_metrics_->metric_descriptor_map
.insert(std::make_pair(
string(metric_def->name()),
std::unique_ptr<MetricDescriptor>(new MetricDescriptor())))
.first->second.get();
}();
metric_descriptor->name = string(metric_def->name());
metric_descriptor->description = string(metric_def->description());
for (const StringPiece label_name : metric_def->label_descriptions()) {
metric_descriptor->label_names.emplace_back(label_name);
}
metric_descriptor->metric_kind = metric_def->kind();
metric_descriptor->value_type = metric_def->value_type();
}
}
CollectionRegistry* CollectionRegistry::Default() {
static CollectionRegistry* default_registry =
new CollectionRegistry(Env::Default());
return default_registry;
}
CollectionRegistry::CollectionRegistry(Env* const env) : env_(env) {}
std::unique_ptr<CollectionRegistry::RegistrationHandle>
CollectionRegistry::Register(const AbstractMetricDef* const metric_def,
const CollectionFunction& collection_function) {
CHECK(collection_function)
<< "Requires collection_function to contain an implementation.";
mutex_lock l(mu_);
const auto found_it = registry_.find(metric_def->name());
if (found_it != registry_.end()) {
LOG(WARNING)
<< "Trying to register 2 metrics with the same name: "
<< metric_def->name()
<< ". The old value will be erased in order to register a new one. "
"Please check if you link the metric more than once, or "
"if the name is already used by other metrics.";
registry_.erase(found_it);
}
registry_.insert(
{metric_def->name(),
{metric_def, collection_function, env_->NowMicros() / 1000}});
return std::unique_ptr<RegistrationHandle>(
new RegistrationHandle(this, metric_def));
}
void CollectionRegistry::Unregister(const AbstractMetricDef* const metric_def) {
mutex_lock l(mu_);
registry_.erase(metric_def->name());
}
std::unique_ptr<CollectedMetrics> CollectionRegistry::CollectMetrics(
const CollectMetricsOptions& options) const {
internal::Collector collector(env_->NowMicros() / 1000);
mutex_lock l(mu_);
for (const auto& registration : registry_) {
if (options.collect_metric_descriptors) {
collector.CollectMetricDescriptor(registration.second.metric_def);
}
collector.CollectMetricValues(registration.second );
}
return collector.ConsumeCollectedMetrics();
}
}
}
#endif | #include "tensorflow/core/lib/monitoring/collection_registry.h"
#include <memory>
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/percentile_sampler.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
using histogram::Histogram;
namespace test_util {
class CollectionRegistryTestAccess {
public:
static std::unique_ptr<CollectionRegistry> CreateRegistry(Env* const env) {
return std::unique_ptr<CollectionRegistry>(new CollectionRegistry(env));
}
};
}
namespace {
void EmptyCollectionFunction(MetricCollectorGetter getter) {}
TEST(CollectionRegistryTest, RegistrationUnregistration) {
auto* collection_registry = CollectionRegistry::Default();
const MetricDef<MetricKind::kCumulative, int64_t, 0> metric_def0(
"/tensorflow/metric0", "An example metric with no labels.");
const MetricDef<MetricKind::kGauge, HistogramProto, 1> metric_def1(
"/tensorflow/metric1", "An example metric with one label.", "LabelName");
{
std::unique_ptr<CollectionRegistry::RegistrationHandle> handle0 =
collection_registry->Register(&metric_def0, EmptyCollectionFunction);
std::unique_ptr<CollectionRegistry::RegistrationHandle> handle1 =
collection_registry->Register(&metric_def1, EmptyCollectionFunction);
handle0.reset();
handle0 =
collection_registry->Register(&metric_def0, EmptyCollectionFunction);
}
}
TEST(CollectionRegistryDeathTest, DuplicateRegistration) {
auto* collection_registry = CollectionRegistry::Default();
const MetricDef<MetricKind::kCumulative, int64_t, 0> metric_def(
"/tensorflow/metric", "An example metric with no labels.");
auto handle =
collection_registry->Register(&metric_def, EmptyCollectionFunction);
auto duplicate_handle =
collection_registry->Register(&metric_def, EmptyCollectionFunction);
EXPECT_NE(duplicate_handle, nullptr);
}
TEST(CollectMetricsTest, Counter) {
auto counter_with_labels = std::unique_ptr<Counter<2>>(
Counter<2>::New("/tensorflow/test/counter_with_labels",
"Counter with labels.", "MyLabel0", "MyLabel1"));
auto counter_without_labels = std::unique_ptr<Counter<0>>(Counter<0>::New(
"/tensorflow/test/counter_without_labels", "Counter without labels."));
counter_with_labels->GetCell("Label00", "Label10")->IncrementBy(42);
counter_with_labels->GetCell("Label01", "Label11")->IncrementBy(58);
counter_without_labels->GetCell()->IncrementBy(7);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/counter_with_labels");
EXPECT_EQ("/tensorflow/test/counter_with_labels", ld.name);
EXPECT_EQ("Counter with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kCumulative, ld.metric_kind);
EXPECT_EQ(ValueType::kInt64, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/counter_without_labels");
EXPECT_EQ("/tensorflow/test/counter_without_labels", ud.name);
EXPECT_EQ("Counter without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kCumulative, ud.metric_kind);
EXPECT_EQ(ValueType::kInt64, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/counter_with_labels");
EXPECT_EQ("/tensorflow/test/counter_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kInt64, lps.points[0]->value_type);
EXPECT_EQ(42, lps.points[0]->int64_value);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kInt64, lps.points[1]->value_type);
EXPECT_EQ(58, lps.points[1]->int64_value);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/counter_without_labels");
EXPECT_EQ("/tensorflow/test/counter_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kInt64, ups.points[0]->value_type);
EXPECT_EQ(7, ups.points[0]->int64_value);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
TEST(CollectMetricsTest, Gauge) {
auto string_gauge_with_labels =
std::unique_ptr<Gauge<string, 2>>(Gauge<string, 2>::New(
"/tensorflow/test/string_gauge_with_labels",
"String gauge with labels.", "MyLabel0", "MyLabel1"));
auto inteter_gauge_without_labels = std::unique_ptr<Gauge<int64_t, 0>>(
Gauge<int64_t, 0>::New("/tensorflow/test/integer_gauge_without_labels",
"Integer gauge without labels."));
string_gauge_with_labels->GetCell("Label00", "Label10")->Set("test1");
string_gauge_with_labels->GetCell("Label01", "Label11")->Set("test2");
inteter_gauge_without_labels->GetCell()->Set(7);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/string_gauge_with_labels");
EXPECT_EQ("/tensorflow/test/string_gauge_with_labels", ld.name);
EXPECT_EQ("String gauge with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kGauge, ld.metric_kind);
EXPECT_EQ(ValueType::kString, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/integer_gauge_without_labels");
EXPECT_EQ("/tensorflow/test/integer_gauge_without_labels", ud.name);
EXPECT_EQ("Integer gauge without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kGauge, ud.metric_kind);
EXPECT_EQ(ValueType::kInt64, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/string_gauge_with_labels");
EXPECT_EQ("/tensorflow/test/string_gauge_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kString, lps.points[0]->value_type);
EXPECT_EQ("test1", lps.points[0]->string_value);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kString, lps.points[1]->value_type);
EXPECT_EQ("test2", lps.points[1]->string_value);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/integer_gauge_without_labels");
EXPECT_EQ("/tensorflow/test/integer_gauge_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kInt64, ups.points[0]->value_type);
EXPECT_EQ(7, ups.points[0]->int64_value);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
void EqHistograms(const Histogram& expected,
const HistogramProto& actual_proto) {
Histogram actual;
ASSERT_TRUE(actual.DecodeFromProto(actual_proto));
EXPECT_EQ(expected.ToString(), actual.ToString());
}
TEST(CollectMetricsTest, Sampler) {
auto sampler_with_labels = std::unique_ptr<Sampler<2>>(
Sampler<2>::New({"/tensorflow/test/sampler_with_labels",
"Sampler with labels.", "MyLabel0", "MyLabel1"},
Buckets::Explicit({1.0, 2.0})));
auto sampler_without_labels = std::unique_ptr<Sampler<0>>(Sampler<0>::New(
{"/tensorflow/test/sampler_without_labels", "Sampler without labels."},
Buckets::Explicit({0.0})));
Histogram with_labels0({1.0, 2.0, DBL_MAX});
sampler_with_labels->GetCell("Label00", "Label10")->Add(0.7);
with_labels0.Add(0.7);
Histogram with_labels1({1.0, 2.0, DBL_MAX});
sampler_with_labels->GetCell("Label01", "Label11")->Add(1.5);
with_labels1.Add(1.5);
Histogram without_labels({0.0, DBL_MAX});
sampler_without_labels->GetCell()->Add(0.5);
without_labels.Add(0.5);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/sampler_with_labels");
EXPECT_EQ("/tensorflow/test/sampler_with_labels", ld.name);
EXPECT_EQ("Sampler with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kCumulative, ld.metric_kind);
EXPECT_EQ(ValueType::kHistogram, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/sampler_without_labels");
EXPECT_EQ("/tensorflow/test/sampler_without_labels", ud.name);
EXPECT_EQ("Sampler without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kCumulative, ud.metric_kind);
EXPECT_EQ(ValueType::kHistogram, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/sampler_with_labels");
EXPECT_EQ("/tensorflow/test/sampler_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kHistogram, lps.points[0]->value_type);
EqHistograms(with_labels0, lps.points[0]->histogram_value);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kHistogram, lps.points[1]->value_type);
EqHistograms(with_labels1, lps.points[1]->histogram_value);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/sampler_without_labels");
EXPECT_EQ("/tensorflow/test/sampler_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kHistogram, ups.points[0]->value_type);
EqHistograms(without_labels, ups.points[0]->histogram_value);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
TEST(CollectMetricsTest, PercentileSampler) {
auto sampler_with_labels =
std::unique_ptr<PercentileSampler<2>>(PercentileSampler<2>::New(
{"/tensorflow/test/pctsampler_with_labels",
"Percentile sampler with labels.", "MyLabel0", "MyLabel1"},
{25.0, 50.0, 75.0}, 1024, UnitOfMeasure::kNumber));
auto sampler_without_labels =
std::unique_ptr<PercentileSampler<0>>(PercentileSampler<0>::New(
{"/tensorflow/test/pctsampler_without_labels",
"Percentile sampler without labels."},
{25.0, 50.0, 75.0}, 1024, UnitOfMeasure::kNumber));
sampler_with_labels->GetCell("Label00", "Label10")->Add(0.7);
sampler_with_labels->GetCell("Label01", "Label11")->Add(1.5);
sampler_without_labels->GetCell()->Add(0.5);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/pctsampler_with_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_with_labels", ld.name);
EXPECT_EQ("Percentile sampler with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kCumulative, ld.metric_kind);
EXPECT_EQ(ValueType::kPercentiles, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/pctsampler_without_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_without_labels", ud.name);
EXPECT_EQ("Percentile sampler without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kCumulative, ud.metric_kind);
EXPECT_EQ(ValueType::kPercentiles, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/pctsampler_with_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kPercentiles, lps.points[0]->value_type);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kPercentiles, lps.points[1]->value_type);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/pctsampler_without_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kPercentiles, ups.points[0]->value_type);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
TEST(CollectionRegistryTest, WriteTimestamps) {
FakeClockEnv fake_clock_env;
auto collection_registry =
test_util::CollectionRegistryTestAccess::CreateRegistry(&fake_clock_env);
fake_clock_env.AdvanceByMillis(25);
{
const MetricDef<MetricKind::kCumulative, int64_t, 0> cumulative_metric(
"/tensorflow/cumulative/metric", "An example metric with no labels.");
auto handle = collection_registry->Register(
&cumulative_metric, [&](MetricCollectorGetter getter) {
auto metric_collector = getter.Get(&cumulative_metric);
metric_collector.CollectValue({}, 42);
});
fake_clock_env.AdvanceByMillis(75);
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics({});
const PointSet& point_set =
*collected_metrics->point_set_map.at("/tensorflow/cumulative/metric");
ASSERT_EQ(1, point_set.points.size());
EXPECT_EQ(25, point_set.points[0]->start_timestamp_millis);
EXPECT_EQ(100, point_set.points[0]->end_timestamp_millis);
}
{
const MetricDef<MetricKind::kGauge, int64_t, 0> gauge_metric(
"/tensorflow/gauge/metric", "An example metric with no labels.");
auto handle = collection_registry->Register(
&gauge_metric, [&](MetricCollectorGetter getter) {
auto metric_collector = getter.Get(&gauge_metric);
metric_collector.CollectValue({}, 42);
});
fake_clock_env.AdvanceByMillis(75);
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics({});
const PointSet& point_set =
*collected_metrics->point_set_map.at("/tensorflow/gauge/metric");
ASSERT_EQ(1, point_set.points.size());
EXPECT_EQ(175, point_set.points[0]->start_timestamp_millis);
EXPECT_EQ(175, point_set.points[0]->end_timestamp_millis);
}
}
}
}
} | std::unique_ptr<CollectedMetrics> Collector::ConsumeCollectedMetrics() {
mutex_lock l(mu_);
return std::move(collected_metrics_);
} | TEST(CollectionRegistryTest, WriteTimestamps) {
FakeClockEnv fake_clock_env;
auto collection_registry =
test_util::CollectionRegistryTestAccess::CreateRegistry(&fake_clock_env);
fake_clock_env.AdvanceByMillis(25);
{
const MetricDef<MetricKind::kCumulative, int64_t, 0> cumulative_metric(
"/tensorflow/cumulative/metric", "An example metric with no labels.");
auto handle = collection_registry->Register(
&cumulative_metric, [&](MetricCollectorGetter getter) {
auto metric_collector = getter.Get(&cumulative_metric);
metric_collector.CollectValue({}, 42);
});
fake_clock_env.AdvanceByMillis(75);
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics({});
const PointSet& point_set =
*collected_metrics->point_set_map.at("/tensorflow/cumulative/metric");
ASSERT_EQ(1, point_set.points.size());
EXPECT_EQ(25, point_set.points[0]->start_timestamp_millis);
EXPECT_EQ(100, point_set.points[0]->end_timestamp_millis);
}
{
const MetricDef<MetricKind::kGauge, int64_t, 0> gauge_metric(
"/tensorflow/gauge/metric", "An example metric with no labels.");
auto handle = collection_registry->Register(
&gauge_metric, [&](MetricCollectorGetter getter) {
auto metric_collector = getter.Get(&gauge_metric);
metric_collector.CollectValue({}, 42);
});
fake_clock_env.AdvanceByMillis(75);
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics({});
const PointSet& point_set =
*collected_metrics->point_set_map.at("/tensorflow/gauge/metric");
ASSERT_EQ(1, point_set.points.size());
EXPECT_EQ(175, point_set.points[0]->start_timestamp_millis);
EXPECT_EQ(175, point_set.points[0]->end_timestamp_millis);
}
} |
#include "arolla/expr/optimization/peephole_optimizations/tuple.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
absl::StatusOr<ExprNodePtr> OptimizeTupleGet(ExprNodePtr expr) {
static Fingerprint make_tuple_fingerprint = MakeTupleOperator().fingerprint();
if (!expr->is_op()) {
return expr;
}
auto get_nth_operator =
fast_dynamic_downcast_final<const GetNthOperator*>(expr->op().get());
if (get_nth_operator == nullptr) {
return expr;
}
if (expr->node_deps().size() != 1) {
return expr;
}
auto tuple_expr = expr->node_deps()[0];
if (!tuple_expr->is_op()) {
return expr;
}
ASSIGN_OR_RETURN(auto tuple_op, DecayRegisteredOperator(tuple_expr->op()));
if (tuple_op->fingerprint() != make_tuple_fingerprint ||
tuple_expr->node_deps().size() <= get_nth_operator->index()) {
return expr;
}
return tuple_expr->node_deps()[get_nth_operator->index()];
}
absl::Status AppendGetNOptimizations(PeepholeOptimizationPack& optimizations) {
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreateTransformOptimization(OptimizeTupleGet));
return absl::OkStatus();
}
}
absl::StatusOr<PeepholeOptimizationPack> TupleOptimizations() {
PeepholeOptimizationPack optimizations;
RETURN_IF_ERROR(AppendGetNOptimizations(optimizations));
return optimizations;
}
} | #include "arolla/expr/optimization/peephole_optimizations/tuple.h"
#include <cstdint>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::WithQTypeAnnotation;
class TupleOptimizationsTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_OK(InitArolla());
ASSERT_OK_AND_ASSIGN(optimizer_,
CreatePeepholeOptimizer({TupleOptimizations}));
}
std::unique_ptr<PeepholeOptimizer> optimizer_;
};
TEST_F(TupleOptimizationsTest, SingleSubstitution) {
auto a = Leaf("l1");
auto b = Leaf("l2");
auto c = Leaf("l3");
auto d = Leaf("l4");
ASSERT_OK_AND_ASSIGN(auto tuple, CallOp("core.make_tuple", {a, b, c, d}));
{
ASSERT_OK_AND_ASSIGN(auto get0, CallOp(GetNthOperator::Make(0), {tuple}));
EXPECT_THAT(optimizer_->Apply(get0), IsOkAndHolds(EqualsExpr(a)));
}
{
ASSERT_OK_AND_ASSIGN(auto get1, CallOp(GetNthOperator::Make(1), {tuple}));
EXPECT_THAT(optimizer_->Apply(get1), IsOkAndHolds(EqualsExpr(b)));
}
{
ASSERT_OK_AND_ASSIGN(auto get2, CallOp(GetNthOperator::Make(2), {tuple}));
EXPECT_THAT(optimizer_->Apply(get2), IsOkAndHolds(EqualsExpr(c)));
}
{
ASSERT_OK_AND_ASSIGN(auto get3, CallOp(GetNthOperator::Make(3), {tuple}));
EXPECT_THAT(optimizer_->Apply(get3), IsOkAndHolds(EqualsExpr(d)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(GetNthOperator::Make(0), {a}));
EXPECT_THAT(optimizer_->Apply(expr), IsOkAndHolds(EqualsExpr(expr)));
}
}
TEST_F(TupleOptimizationsTest, WorksWithConcatTuples) {
ASSERT_OK_AND_ASSIGN(auto a,
WithQTypeAnnotation(Leaf("a"), GetQType<int32_t>()));
ASSERT_OK_AND_ASSIGN(auto b,
WithQTypeAnnotation(Leaf("b"), GetQType<int64_t>()));
ASSERT_OK_AND_ASSIGN(
auto concat_tuples,
CallOp("core.concat_tuples",
{CallOp("core.make_tuple", {a, b}), CallOp("core.make_tuple", {b}),
CallOp("core.make_tuple", {a})}));
ASSERT_OK_AND_ASSIGN(auto lowest_concat_tuples, ToLowest(concat_tuples));
EXPECT_THAT(
optimizer_->Apply(lowest_concat_tuples),
IsOkAndHolds(EqualsExpr(CallOp("core.make_tuple", {a, b, b, a}))));
ASSERT_OK_AND_ASSIGN(auto get_2,
CallOp(GetNthOperator::Make(2), {concat_tuples}));
}
}
} | #include "arolla/expr/optimization/peephole_optimizations/tuple.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
absl::StatusOr<ExprNodePtr> OptimizeTupleGet(ExprNodePtr expr) {
static Fingerprint make_tuple_fingerprint = MakeTupleOperator().fingerprint();
if (!expr->is_op()) {
return expr;
}
auto get_nth_operator =
fast_dynamic_downcast_final<const GetNthOperator*>(expr->op().get());
if (get_nth_operator == nullptr) {
return expr;
}
if (expr->node_deps().size() != 1) {
return expr;
}
auto tuple_expr = expr->node_deps()[0];
if (!tuple_expr->is_op()) {
return expr;
}
ASSIGN_OR_RETURN(auto tuple_op, DecayRegisteredOperator(tuple_expr->op()));
if (tuple_op->fingerprint() != make_tuple_fingerprint ||
tuple_expr->node_deps().size() <= get_nth_operator->index()) {
return expr;
}
return tuple_expr->node_deps()[get_nth_operator->index()];
} | TEST_F(TupleOptimizationsTest, SingleSubstitution) {
auto a = Leaf("l1");
auto b = Leaf("l2");
auto c = Leaf("l3");
auto d = Leaf("l4");
ASSERT_OK_AND_ASSIGN(auto tuple, CallOp("core.make_tuple", {a, b, c, d}));
{
ASSERT_OK_AND_ASSIGN(auto get0, CallOp(GetNthOperator::Make(0), {tuple}));
EXPECT_THAT(optimizer_->Apply(get0), IsOkAndHolds(EqualsExpr(a)));
}
{
ASSERT_OK_AND_ASSIGN(auto get1, CallOp(GetNthOperator::Make(1), {tuple}));
EXPECT_THAT(optimizer_->Apply(get1), IsOkAndHolds(EqualsExpr(b)));
}
{
ASSERT_OK_AND_ASSIGN(auto get2, CallOp(GetNthOperator::Make(2), {tuple}));
EXPECT_THAT(optimizer_->Apply(get2), IsOkAndHolds(EqualsExpr(c)));
}
{
ASSERT_OK_AND_ASSIGN(auto get3, CallOp(GetNthOperator::Make(3), {tuple}));
EXPECT_THAT(optimizer_->Apply(get3), IsOkAndHolds(EqualsExpr(d)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(GetNthOperator::Make(0), {a}));
EXPECT_THAT(optimizer_->Apply(expr), IsOkAndHolds(EqualsExpr(expr)));
}
} |
#include "quiche/quic/test_tools/simulator/simulator.h"
#include <utility>
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace simulator {
Simulator::Simulator() : Simulator(nullptr) {}
Simulator::Simulator(QuicRandom* random_generator)
: random_generator_(random_generator),
alarm_factory_(this, "Default Alarm Manager"),
run_for_should_stop_(false),
enable_random_delays_(false) {
run_for_alarm_.reset(
alarm_factory_.CreateAlarm(new RunForDelegate(&run_for_should_stop_)));
}
Simulator::~Simulator() {
run_for_alarm_.reset();
}
Simulator::Clock::Clock() : now_(kStartTime) {}
QuicTime Simulator::Clock::ApproximateNow() const { return now_; }
QuicTime Simulator::Clock::Now() const { return now_; }
QuicWallTime Simulator::Clock::WallNow() const {
return QuicWallTime::FromUNIXMicroseconds(
(now_ - QuicTime::Zero()).ToMicroseconds());
}
void Simulator::AddActor(Actor* actor) {
auto emplace_times_result =
scheduled_times_.insert(std::make_pair(actor, QuicTime::Infinite()));
auto emplace_names_result = actor_names_.insert(actor->name());
QUICHE_DCHECK(emplace_times_result.second);
QUICHE_DCHECK(emplace_names_result.second);
}
void Simulator::RemoveActor(Actor* actor) {
auto scheduled_time_it = scheduled_times_.find(actor);
auto actor_names_it = actor_names_.find(actor->name());
QUICHE_DCHECK(scheduled_time_it != scheduled_times_.end());
QUICHE_DCHECK(actor_names_it != actor_names_.end());
QuicTime scheduled_time = scheduled_time_it->second;
if (scheduled_time != QuicTime::Infinite()) {
Unschedule(actor);
}
scheduled_times_.erase(scheduled_time_it);
actor_names_.erase(actor_names_it);
}
void Simulator::Schedule(Actor* actor, QuicTime new_time) {
auto scheduled_time_it = scheduled_times_.find(actor);
QUICHE_DCHECK(scheduled_time_it != scheduled_times_.end());
QuicTime scheduled_time = scheduled_time_it->second;
if (scheduled_time <= new_time) {
return;
}
if (scheduled_time != QuicTime::Infinite()) {
Unschedule(actor);
}
scheduled_time_it->second = new_time;
schedule_.insert(std::make_pair(new_time, actor));
}
void Simulator::Unschedule(Actor* actor) {
auto scheduled_time_it = scheduled_times_.find(actor);
QUICHE_DCHECK(scheduled_time_it != scheduled_times_.end());
QuicTime scheduled_time = scheduled_time_it->second;
QUICHE_DCHECK(scheduled_time != QuicTime::Infinite());
auto range = schedule_.equal_range(scheduled_time);
for (auto it = range.first; it != range.second; ++it) {
if (it->second == actor) {
schedule_.erase(it);
scheduled_time_it->second = QuicTime::Infinite();
return;
}
}
QUICHE_DCHECK(false);
}
const QuicClock* Simulator::GetClock() const { return &clock_; }
QuicRandom* Simulator::GetRandomGenerator() {
if (random_generator_ == nullptr) {
random_generator_ = QuicRandom::GetInstance();
}
return random_generator_;
}
quiche::QuicheBufferAllocator* Simulator::GetStreamSendBufferAllocator() {
return &buffer_allocator_;
}
QuicAlarmFactory* Simulator::GetAlarmFactory() { return &alarm_factory_; }
Simulator::RunForDelegate::RunForDelegate(bool* run_for_should_stop)
: run_for_should_stop_(run_for_should_stop) {}
void Simulator::RunForDelegate::OnAlarm() { *run_for_should_stop_ = true; }
void Simulator::RunFor(QuicTime::Delta time_span) {
QUICHE_DCHECK(!run_for_alarm_->IsSet());
const QuicTime end_time = clock_.Now() + time_span;
run_for_alarm_->Set(end_time);
run_for_should_stop_ = false;
bool simulation_result = RunUntil([this]() { return run_for_should_stop_; });
QUICHE_DCHECK(simulation_result);
QUICHE_DCHECK(clock_.Now() == end_time);
}
void Simulator::HandleNextScheduledActor() {
const auto current_event_it = schedule_.begin();
QuicTime event_time = current_event_it->first;
Actor* actor = current_event_it->second;
QUIC_DVLOG(3) << "At t = " << event_time.ToDebuggingValue() << ", calling "
<< actor->name();
Unschedule(actor);
if (clock_.Now() > event_time) {
QUIC_BUG(quic_bug_10150_1)
<< "Error: event registered by [" << actor->name()
<< "] requires travelling back in time. Current time: "
<< clock_.Now().ToDebuggingValue()
<< ", scheduled time: " << event_time.ToDebuggingValue();
}
clock_.now_ = event_time;
actor->Act();
}
}
} | #include "quiche/quic/test_tools/simulator/simulator.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/node_hash_map.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simulator/alarm_factory.h"
#include "quiche/quic/test_tools/simulator/link.h"
#include "quiche/quic/test_tools/simulator/packet_filter.h"
#include "quiche/quic/test_tools/simulator/queue.h"
#include "quiche/quic/test_tools/simulator/switch.h"
#include "quiche/quic/test_tools/simulator/traffic_policer.h"
using testing::_;
using testing::Return;
using testing::StrictMock;
namespace quic {
namespace simulator {
class Counter : public Actor {
public:
Counter(Simulator* simulator, std::string name, QuicTime::Delta period)
: Actor(simulator, name), value_(-1), period_(period) {
Schedule(clock_->Now());
}
~Counter() override {}
inline int get_value() const { return value_; }
void Act() override {
++value_;
QUIC_DVLOG(1) << name_ << " has value " << value_ << " at time "
<< clock_->Now().ToDebuggingValue();
Schedule(clock_->Now() + period_);
}
private:
int value_;
QuicTime::Delta period_;
};
class SimulatorTest : public quic::test::QuicTest {};
TEST_F(SimulatorTest, Counters) {
Simulator simulator;
for (int i = 0; i < 2; ++i) {
Counter fast_counter(&simulator, "fast_counter",
QuicTime::Delta::FromSeconds(3));
Counter slow_counter(&simulator, "slow_counter",
QuicTime::Delta::FromSeconds(10));
simulator.RunUntil(
[&slow_counter]() { return slow_counter.get_value() >= 10; });
EXPECT_EQ(10, slow_counter.get_value());
EXPECT_EQ(10 * 10 / 3, fast_counter.get_value());
}
}
class CounterPort : public UnconstrainedPortInterface {
public:
CounterPort() { Reset(); }
~CounterPort() override {}
inline QuicByteCount bytes() const { return bytes_; }
inline QuicPacketCount packets() const { return packets_; }
void AcceptPacket(std::unique_ptr<Packet> packet) override {
bytes_ += packet->size;
packets_ += 1;
per_destination_packet_counter_[packet->destination] += 1;
}
void Reset() {
bytes_ = 0;
packets_ = 0;
per_destination_packet_counter_.clear();
}
QuicPacketCount CountPacketsForDestination(std::string destination) const {
auto result_it = per_destination_packet_counter_.find(destination);
if (result_it == per_destination_packet_counter_.cend()) {
return 0;
}
return result_it->second;
}
private:
QuicByteCount bytes_;
QuicPacketCount packets_;
absl::node_hash_map<std::string, QuicPacketCount>
per_destination_packet_counter_;
};
class LinkSaturator : public Endpoint {
public:
LinkSaturator(Simulator* simulator, std::string name,
QuicByteCount packet_size, std::string destination)
: Endpoint(simulator, name),
packet_size_(packet_size),
destination_(std::move(destination)),
bytes_transmitted_(0),
packets_transmitted_(0) {
Schedule(clock_->Now());
}
void Act() override {
if (tx_port_->TimeUntilAvailable().IsZero()) {
auto packet = std::make_unique<Packet>();
packet->source = name_;
packet->destination = destination_;
packet->tx_timestamp = clock_->Now();
packet->size = packet_size_;
tx_port_->AcceptPacket(std::move(packet));
bytes_transmitted_ += packet_size_;
packets_transmitted_ += 1;
}
Schedule(clock_->Now() + tx_port_->TimeUntilAvailable());
}
UnconstrainedPortInterface* GetRxPort() override {
return static_cast<UnconstrainedPortInterface*>(&rx_port_);
}
void SetTxPort(ConstrainedPortInterface* port) override { tx_port_ = port; }
CounterPort* counter() { return &rx_port_; }
inline QuicByteCount bytes_transmitted() const { return bytes_transmitted_; }
inline QuicPacketCount packets_transmitted() const {
return packets_transmitted_;
}
void Pause() { Unschedule(); }
void Resume() { Schedule(clock_->Now()); }
private:
QuicByteCount packet_size_;
std::string destination_;
ConstrainedPortInterface* tx_port_;
CounterPort rx_port_;
QuicByteCount bytes_transmitted_;
QuicPacketCount packets_transmitted_;
};
TEST_F(SimulatorTest, DirectLinkSaturation) {
Simulator simulator;
LinkSaturator saturator_a(&simulator, "Saturator A", 1000, "Saturator B");
LinkSaturator saturator_b(&simulator, "Saturator B", 100, "Saturator A");
SymmetricLink link(&saturator_a, &saturator_b,
QuicBandwidth::FromKBytesPerSecond(1000),
QuicTime::Delta::FromMilliseconds(100) +
QuicTime::Delta::FromMicroseconds(1));
const QuicTime start_time = simulator.GetClock()->Now();
const QuicTime after_first_50_ms =
start_time + QuicTime::Delta::FromMilliseconds(50);
simulator.RunUntil([&simulator, after_first_50_ms]() {
return simulator.GetClock()->Now() >= after_first_50_ms;
});
EXPECT_LE(1000u * 50u, saturator_a.bytes_transmitted());
EXPECT_GE(1000u * 51u, saturator_a.bytes_transmitted());
EXPECT_LE(1000u * 50u, saturator_b.bytes_transmitted());
EXPECT_GE(1000u * 51u, saturator_b.bytes_transmitted());
EXPECT_LE(50u, saturator_a.packets_transmitted());
EXPECT_GE(51u, saturator_a.packets_transmitted());
EXPECT_LE(500u, saturator_b.packets_transmitted());
EXPECT_GE(501u, saturator_b.packets_transmitted());
EXPECT_EQ(0u, saturator_a.counter()->bytes());
EXPECT_EQ(0u, saturator_b.counter()->bytes());
simulator.RunUntil([&saturator_a, &saturator_b]() {
if (saturator_a.counter()->packets() > 1000 ||
saturator_b.counter()->packets() > 100) {
ADD_FAILURE() << "The simulation did not arrive at the expected "
"termination contidition. Saturator A counter: "
<< saturator_a.counter()->packets()
<< ", saturator B counter: "
<< saturator_b.counter()->packets();
return true;
}
return saturator_a.counter()->packets() == 1000 &&
saturator_b.counter()->packets() == 100;
});
EXPECT_EQ(201u, saturator_a.packets_transmitted());
EXPECT_EQ(2001u, saturator_b.packets_transmitted());
EXPECT_EQ(201u * 1000, saturator_a.bytes_transmitted());
EXPECT_EQ(2001u * 100, saturator_b.bytes_transmitted());
EXPECT_EQ(1000u,
saturator_a.counter()->CountPacketsForDestination("Saturator A"));
EXPECT_EQ(100u,
saturator_b.counter()->CountPacketsForDestination("Saturator B"));
EXPECT_EQ(0u,
saturator_a.counter()->CountPacketsForDestination("Saturator B"));
EXPECT_EQ(0u,
saturator_b.counter()->CountPacketsForDestination("Saturator A"));
const QuicTime end_time = simulator.GetClock()->Now();
const QuicBandwidth observed_bandwidth = QuicBandwidth::FromBytesAndTimeDelta(
saturator_a.bytes_transmitted(), end_time - start_time);
EXPECT_APPROX_EQ(link.bandwidth(), observed_bandwidth, 0.01f);
}
class PacketAcceptor : public ConstrainedPortInterface {
public:
void AcceptPacket(std::unique_ptr<Packet> packet) override {
packets_.emplace_back(std::move(packet));
}
QuicTime::Delta TimeUntilAvailable() override {
return QuicTime::Delta::Zero();
}
std::vector<std::unique_ptr<Packet>>* packets() { return &packets_; }
private:
std::vector<std::unique_ptr<Packet>> packets_;
};
TEST_F(SimulatorTest, Queue) {
Simulator simulator;
Queue queue(&simulator, "Queue", 1000);
PacketAcceptor acceptor;
queue.set_tx_port(&acceptor);
EXPECT_EQ(0u, queue.bytes_queued());
EXPECT_EQ(0u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
auto first_packet = std::make_unique<Packet>();
first_packet->size = 600;
queue.AcceptPacket(std::move(first_packet));
EXPECT_EQ(600u, queue.bytes_queued());
EXPECT_EQ(1u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
auto second_packet = std::make_unique<Packet>();
second_packet->size = 500;
queue.AcceptPacket(std::move(second_packet));
EXPECT_EQ(600u, queue.bytes_queued());
EXPECT_EQ(1u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
auto third_packet = std::make_unique<Packet>();
third_packet->size = 400;
queue.AcceptPacket(std::move(third_packet));
EXPECT_EQ(1000u, queue.bytes_queued());
EXPECT_EQ(2u, queue.packets_queued());
EXPECT_EQ(0u, acceptor.packets()->size());
simulator.RunUntil([]() { return false; });
EXPECT_EQ(0u, queue.bytes_queued());
EXPECT_EQ(0u, queue.packets_queued());
ASSERT_EQ(2u, acceptor.packets()->size());
EXPECT_EQ(600u, acceptor.packets()->at(0)->size);
EXPECT_EQ(400u, acceptor.packets()->at(1)->size);
}
TEST_F(SimulatorTest, QueueBottleneck) {
const QuicBandwidth local_bandwidth =
QuicBandwidth::FromKBytesPerSecond(1000);
const QuicBandwidth bottleneck_bandwidth = 0.1f * local_bandwidth;
const QuicTime::Delta local_propagation_delay =
QuicTime::Delta::FromMilliseconds(1);
const QuicTime::Delta bottleneck_propagation_delay =
QuicTime::Delta::FromMilliseconds(20);
const QuicByteCount bdp =
bottleneck_bandwidth *
(local_propagation_delay + bottleneck_propagation_delay);
Simulator simulator;
LinkSaturator saturator(&simulator, "Saturator", 1000, "Counter");
ASSERT_GE(bdp, 1000u);
Queue queue(&simulator, "Queue", bdp);
CounterPort counter;
OneWayLink local_link(&simulator, "Local link", &queue, local_bandwidth,
local_propagation_delay);
OneWayLink bottleneck_link(&simulator, "Bottleneck link", &counter,
bottleneck_bandwidth,
bottleneck_propagation_delay);
saturator.SetTxPort(&local_link);
queue.set_tx_port(&bottleneck_link);
static const QuicPacketCount packets_received = 1000;
simulator.RunUntil(
[&counter]() { return counter.packets() == packets_received; });
const double loss_ratio = 1 - static_cast<double>(packets_received) /
saturator.packets_transmitted();
EXPECT_NEAR(loss_ratio, 0.9, 0.001);
}
TEST_F(SimulatorTest, OnePacketQueue) {
const QuicBandwidth local_bandwidth =
QuicBandwidth::FromKBytesPerSecond(1000);
const QuicBandwidth bottleneck_bandwidth = 0.1f * local_bandwidth;
const QuicTime::Delta local_propagation_delay =
QuicTime::Delta::FromMilliseconds(1);
const QuicTime::Delta bottleneck_propagation_delay =
QuicTime::Delta::FromMilliseconds(20);
Simulator simulator;
LinkSaturator saturator(&simulator, "Saturator", 1000, "Counter");
Queue queue(&simulator, "Queue", 1000);
CounterPort counter;
OneWayLink local_link(&simulator, "Local link", &queue, local_bandwidth,
local_propagation_delay);
OneWayLink bottleneck_link(&simulator, "Bottleneck link", &counter,
bottleneck_bandwidth,
bottleneck_propagation_delay);
saturator.SetTxPort(&local_link);
queue.set_tx_port(&bottleneck_link);
static const QuicPacketCount packets_received = 10;
const QuicTime deadline =
simulator.GetClock()->Now() + QuicTime::Delta::FromSeconds(10);
simulator.RunUntil([&simulator, &counter, deadline]() {
return counter.packets() == packets_received ||
simulator.GetClock()->Now() > deadline;
});
ASSERT_EQ(packets_received, counter.packets());
}
TEST_F(SimulatorTest, SwitchedNetwork) {
const QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(10000);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(50);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 1000, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 1000, "Saturator 3");
LinkSaturator saturator3(&simulator, "Saturator 3", 1000, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, network_switch.port(2), bandwidth,
base_propagation_delay * 2);
SymmetricLink link3(&saturator3, network_switch.port(3), bandwidth,
base_propagation_delay * 3);
const QuicTime start_time = simulator.GetClock()->Now();
static const QuicPacketCount bytes_received = 64 * 1000;
simulator.RunUntil([&saturator1]() {
return saturator1.counter()->bytes() >= bytes_received;
});
const QuicTime end_time = simulator.GetClock()->Now();
const QuicBandwidth observed_bandwidth = QuicBandwidth::FromBytesAndTimeDelta(
bytes_received, end_time - start_time);
const double bandwidth_ratio =
static_cast<double>(observed_bandwidth.ToBitsPerSecond()) /
bandwidth.ToBitsPerSecond();
EXPECT_NEAR(1, bandwidth_ratio, 0.1);
const double normalized_received_packets_for_saturator_2 =
static_cast<double>(saturator2.counter()->packets()) /
saturator1.counter()->packets();
const double normalized_received_packets_for_saturator_3 =
static_cast<double>(saturator3.counter()->packets()) /
saturator1.counter()->packets();
EXPECT_NEAR(1, normalized_received_packets_for_saturator_2, 0.1);
EXPECT_NEAR(1, normalized_received_packets_for_saturator_3, 0.1);
EXPECT_EQ(0u,
saturator2.counter()->CountPacketsForDestination("Saturator 1"));
EXPECT_EQ(0u,
saturator3.counter()->CountPacketsForDestination("Saturator 1"));
EXPECT_EQ(1u,
saturator1.counter()->CountPacketsForDestination("Saturator 2"));
EXPECT_EQ(1u,
saturator3.counter()->CountPacketsForDestination("Saturator 2"));
EXPECT_EQ(1u,
saturator1.counter()->CountPacketsForDestination("Saturator 3"));
EXPECT_EQ(1u,
saturator2.counter()->CountPacketsForDestination("Saturator 3"));
}
class AlarmToggler : public Actor {
public:
AlarmToggler(Simulator* simulator, std::string name, QuicAlarm* alarm,
QuicTime::Delta interval)
: Actor(simulator, name),
alarm_(alarm),
interval_(interval),
deadline_(alarm->deadline()),
times_set_(0),
times_cancelled_(0) {
EXPECT_TRUE(alarm->IsSet());
EXPECT_GE(alarm->deadline(), clock_->Now());
Schedule(clock_->Now());
}
void Act() override {
if (deadline_ <= clock_->Now()) {
return;
}
if (alarm_->IsSet()) {
alarm_->Cancel();
times_cancelled_++;
} else {
alarm_->Set(deadline_);
times_set_++;
}
Schedule(clock_->Now() + interval_);
}
inline int times_set() { return times_set_; }
inline int times_cancelled() { return times_cancelled_; }
private:
QuicAlarm* alarm_;
QuicTime::Delta interval_;
QuicTime deadline_;
int times_set_;
int times_cancelled_;
};
class CounterDelegate : public QuicAlarm::DelegateWithoutContext {
public:
explicit CounterDelegate(size_t* counter) : counter_(counter) {}
void OnAlarm() override { *counter_ += 1; }
private:
size_t* counter_;
};
TEST_F(SimulatorTest, Alarms) {
Simulator simulator;
QuicAlarmFactory* alarm_factory = simulator.GetAlarmFactory();
size_t fast_alarm_counter = 0;
size_t slow_alarm_counter = 0;
std::unique_ptr<QuicAlarm> alarm_fast(
alarm_factory->CreateAlarm(new CounterDelegate(&fast_alarm_counter)));
std::unique_ptr<QuicAlarm> alarm_slow(
alarm_factory->CreateAlarm(new CounterDelegate(&slow_alarm_counter)));
const QuicTime start_time = simulator.GetClock()->Now();
alarm_fast->Set(start_time + QuicTime::Delta::FromMilliseconds(100));
alarm_slow->Set(start_time + QuicTime::Delta::FromMilliseconds(750));
AlarmToggler toggler(&simulator, "Toggler", alarm_slow.get(),
QuicTime::Delta::FromMilliseconds(100));
const QuicTime end_time =
start_time + QuicTime::Delta::FromMilliseconds(1000);
EXPECT_FALSE(simulator.RunUntil([&simulator, end_time]() {
return simulator.GetClock()->Now() >= end_time;
}));
EXPECT_EQ(1u, slow_alarm_counter);
EXPECT_EQ(1u, fast_alarm_counter);
EXPECT_EQ(4, toggler.times_set());
EXPECT_EQ(4, toggler.times_cancelled());
}
TEST_F(SimulatorTest, AlarmCancelling) {
Simulator simulator;
QuicAlarmFactory* alarm_factory = simulator.GetAlarmFactory();
size_t alarm_counter = 0;
std::unique_ptr<QuicAlarm> alarm(
alarm_factory->CreateAlarm(new CounterDelegate(&alarm_counter)));
const QuicTime start_time = simulator.GetClock()->Now();
const QuicTime alarm_at = start_time + QuicTime::Delta::FromMilliseconds(300);
const QuicTime end_time = start_time + QuicTime::Delta::FromMilliseconds(400);
alarm->Set(alarm_at);
alarm->Cancel();
EXPECT_FALSE(alarm->IsSet());
EXPECT_FALSE(simulator.RunUntil([&simulator, end_time]() {
return simulator.GetClock()->Now() >= end_time;
}));
EXPECT_FALSE(alarm->IsSet());
EXPECT_EQ(0u, alarm_counter);
}
TEST_F(SimulatorTest, AlarmInPast) {
Simulator simulator;
QuicAlarmFactory* alarm_factory = simulator.GetAlarmFactory();
size_t alarm_counter = 0;
std::unique_ptr<QuicAlarm> alarm(
alarm_factory->CreateAlarm(new CounterDelegate(&alarm_counter)));
const QuicTime start_time = simulator.GetClock()->Now();
simulator.RunFor(QuicTime::Delta::FromMilliseconds(400));
alarm->Set(start_time);
simulator.RunFor(QuicTime::Delta::FromMilliseconds(1));
EXPECT_FALSE(alarm->IsSet());
EXPECT_EQ(1u, alarm_counter);
}
TEST_F(SimulatorTest, RunUntilOrTimeout) {
Simulator simulator;
bool simulation_result;
Counter counter(&simulator, "counter", QuicTime::Delta::FromSeconds(1));
simulation_result = simulator.RunUntilOrTimeout(
[&counter]() { return counter.get_value() == 10; },
QuicTime::Delta::FromSeconds(20));
ASSERT_TRUE(simulation_result);
simulation_result = simulator.RunUntilOrTimeout(
[&counter]() { return counter.get_value() == 100; },
QuicTime::Delta::FromSeconds(20));
ASSERT_FALSE(simulation_result);
}
TEST_F(SimulatorTest, RunFor) {
Simulator simulator;
Counter counter(&simulator, "counter", QuicTime::Delta::FromSeconds(3));
simulator.RunFor(QuicTime::Delta::FromSeconds(100));
EXPECT_EQ(33, counter.get_value());
}
class MockPacketFilter : public PacketFilter {
public:
MockPacketFilter(Simulator* simulator, std::string name, Endpoint* endpoint)
: PacketFilter(simulator, name, endpoint) {}
MOCK_METHOD(bool, FilterPacket, (const Packet&), (override));
};
TEST_F(SimulatorTest, PacketFilter) {
const QuicBandwidth bandwidth =
QuicBandwidth::FromBytesPerSecond(1024 * 1024);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(5);
Simulator simulator;
LinkSaturator saturator_a(&simulator, "Saturator A", 1000, "Saturator B");
LinkSaturator saturator_b(&simulator, "Saturator B", 1000, "Saturator A");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
StrictMock<MockPacketFilter> a_to_b_filter(&simulator, "A -> B filter",
network_switch.port(1));
StrictMock<MockPacketFilter> b_to_a_filter(&simulator, "B -> A filter",
network_switch.port(2));
SymmetricLink link_a(&a_to_b_filter, &saturator_b, bandwidth,
base_propagation_delay);
SymmetricLink link_b(&b_to_a_filter, &saturator_a, bandwidth,
base_propagation_delay);
EXPECT_CALL(a_to_b_filter, FilterPacket(_)).WillRepeatedly(Return(true));
EXPECT_CALL(b_to_a_filter, FilterPacket(_)).WillRepeatedly(Return(false));
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_GE(saturator_b.counter()->packets(), 1u);
EXPECT_EQ(saturator_a.counter()->packets(), 0u);
}
TEST_F(SimulatorTest, TrafficPolicer) {
const QuicBandwidth bandwidth =
QuicBandwidth::FromBytesPerSecond(1024 * 1024);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(5);
const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(10);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 1000, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 1000, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
static const QuicByteCount initial_burst = 1000 * 10;
static const QuicByteCount max_bucket_size = 1000 * 100;
static const QuicBandwidth target_bandwidth = bandwidth * 0.25;
TrafficPolicer policer(&simulator, "Policer", initial_burst, max_bucket_size,
target_bandwidth, network_switch.port(2));
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, &policer, bandwidth, base_propagation_delay);
bool simulator_result = simulator.RunUntilOrTimeout(
[&saturator1]() {
return saturator1.bytes_transmitted() == initial_burst;
},
timeout);
ASSERT_TRUE(simulator_result);
saturator1.Pause();
simulator_result = simulator.RunUntilOrTimeout(
[&saturator2]() {
return saturator2.counter()->bytes() == initial_burst;
},
timeout);
ASSERT_TRUE(simulator_result);
saturator1.Resume();
const QuicTime::Delta simulation_time = QuicTime::Delta::FromSeconds(10);
simulator.RunFor(simulation_time);
for (auto* saturator : {&saturator1, &saturator2}) {
EXPECT_APPROX_EQ(bandwidth * simulation_time,
saturator->bytes_transmitted(), 0.01f);
}
EXPECT_APPROX_EQ(saturator1.bytes_transmitted() / 4,
saturator2.counter()->bytes(), 0.1f);
EXPECT_APPROX_EQ(saturator2.bytes_transmitted(),
saturator1.counter()->bytes(), 0.1f);
}
TEST_F(SimulatorTest, TrafficPolicerBurst) {
const QuicBandwidth bandwidth =
QuicBandwidth::FromBytesPerSecond(1024 * 1024);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMilliseconds(5);
const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(10);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 1000, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 1000, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8,
bandwidth * base_propagation_delay * 10);
const QuicByteCount initial_burst = 1000 * 10;
const QuicByteCount max_bucket_size = 1000 * 100;
const QuicBandwidth target_bandwidth = bandwidth * 0.25;
TrafficPolicer policer(&simulator, "Policer", initial_burst, max_bucket_size,
target_bandwidth, network_switch.port(2));
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, &policer, bandwidth, base_propagation_delay);
bool simulator_result = simulator.RunUntilOrTimeout(
[&saturator1, &saturator2]() {
return saturator1.packets_transmitted() > 0 &&
saturator2.packets_transmitted() > 0;
},
timeout);
ASSERT_TRUE(simulator_result);
saturator1.Pause();
saturator2.Pause();
simulator.RunFor(1.5f * target_bandwidth.TransferTime(max_bucket_size));
saturator1.Resume();
simulator.RunFor(bandwidth.TransferTime(max_bucket_size));
saturator1.Pause();
simulator.RunFor(2 * base_propagation_delay);
EXPECT_APPROX_EQ(saturator1.bytes_transmitted(),
saturator2.counter()->bytes(), 0.1f);
saturator1.Resume();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_APPROX_EQ(saturator1.bytes_transmitted() / 4,
saturator2.counter()->bytes(), 0.1f);
}
TEST_F(SimulatorTest, PacketAggregation) {
const QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(1000);
const QuicTime::Delta base_propagation_delay =
QuicTime::Delta::FromMicroseconds(1);
const QuicByteCount aggregation_threshold = 1000;
const QuicTime::Delta aggregation_timeout = QuicTime::Delta::FromSeconds(30);
Simulator simulator;
LinkSaturator saturator1(&simulator, "Saturator 1", 10, "Saturator 2");
LinkSaturator saturator2(&simulator, "Saturator 2", 10, "Saturator 1");
Switch network_switch(&simulator, "Switch", 8, 10 * aggregation_threshold);
SymmetricLink link1(&saturator1, network_switch.port(1), bandwidth,
base_propagation_delay);
SymmetricLink link2(&saturator2, network_switch.port(2), bandwidth,
2 * base_propagation_delay);
Queue* queue = network_switch.port_queue(2);
queue->EnableAggregation(aggregation_threshold, aggregation_timeout);
network_switch.port_queue(1)->EnableAggregation(5, aggregation_timeout);
simulator.RunFor(0.9 * bandwidth.TransferTime(aggregation_threshold));
EXPECT_EQ(0u, saturator2.counter()->bytes());
saturator1.Pause();
saturator2.Pause();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_EQ(0u, saturator2.counter()->bytes());
EXPECT_EQ(900u, queue->bytes_queued());
EXPECT_EQ(910u, saturator1.counter()->bytes());
saturator1.Resume();
simulator.RunFor(0.5 * bandwidth.TransferTime(aggregation_threshold));
saturator1.Pause();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_EQ(1000u, saturator2.counter()->bytes());
EXPECT_EQ(400u, queue->bytes_queued());
simulator.RunFor(aggregation_timeout);
EXPECT_EQ(1400u, saturator2.counter()->bytes());
EXPECT_EQ(0u, queue->bytes_queued());
saturator1.Resume();
simulator.RunFor(5.5 * bandwidth.TransferTime(aggregation_threshold));
saturator1.Pause();
simulator.RunFor(QuicTime::Delta::FromSeconds(10));
EXPECT_EQ(6400u, saturator2.counter()->bytes());
EXPECT_EQ(500u, queue->bytes_queued());
simulator.RunFor(aggregation_timeout);
EXPECT_EQ(6900u, saturator2.counter()->bytes());
EXPECT_EQ(0u, queue->bytes_queued());
}
}
} | QuicAlarmFactory* Simulator::GetAlarmFactory() { return &alarm_factory_; } | |
#include "tensorflow/core/grappler/utils/pattern_utils.h"
#include <algorithm>
#include <memory>
#include "absl/container/flat_hash_set.h"
namespace tensorflow {
namespace grappler {
namespace utils {
const bool IsCommutativeOp(const string& op) {
std::vector<string> op_list = str_util::Split(op, '|');
static const auto* commutative_ops = new absl::flat_hash_set<string>(
{"Add", "AddV2", "Mul", "Maximum", "SquaredDifference"});
for (const string& op_ : op_list) {
if (commutative_ops->contains(op_)) return true;
}
return false;
}
bool IsSame(string op1, string op2) {
if (op1 == "*") return true;
std::vector<string> op1_list = str_util::Split(op1, '|');
for (const string& op_1 : op1_list) {
if (op_1 == op2) return true;
}
return false;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::DoesOpTypePatternMatch(
const OpTypePattern& pattern, MutableNodeView* node_view,
NodeViewMatch* match) {
if ((node_view->NumControllingFanins() > 0 &&
pattern.node_status != NodeStatus::kRemain) ||
(node_view->NumControlledFanouts() > 0 &&
pattern.node_status == NodeStatus::kRemove))
return false;
bool op_type_matched = false;
if (pattern.op == "*") {
op_type_matched = true;
} else {
std::vector<string> op_list = str_util::Split(pattern.op, '|');
for (const string& op : op_list) {
if (node_view->node()->op() == op) {
op_type_matched = true;
break;
}
}
}
if (op_type_matched) {
if (node_label_to_index_.find(pattern.label) ==
node_label_to_index_.end()) {
node_label_to_index_[pattern.label] = node_view->node_index();
matched_node_indices_.insert(node_view->node_index());
if (pattern.node_status == NodeStatus::kRemove) {
remove_node_indices_.insert(node_view->node_index());
}
} else if (node_label_to_index_[pattern.label] != node_view->node_index()) {
return false;
} else {
DCHECK(node_label_to_index_[pattern.label] == node_view->node_index());
}
} else {
return false;
}
match->node_view = node_view;
if (!pattern.children.empty()) {
auto graph_children = node_view->GetRegularFanins();
int num_children = graph_children.size();
if (num_children != pattern.children.size()) {
return false;
} else {
std::vector<int> pattern_child_indices(num_children);
std::iota(pattern_child_indices.begin(), pattern_child_indices.end(), 0);
string op_name = pattern.op;
if (IsCommutativeOp(op_name) && num_children == 2) {
MutableNodeView* graph_child0_node_view =
graph_view_->GetNode(graph_children[0].node_index());
MutableNodeView* graph_child1_node_view =
graph_view_->GetNode(graph_children[1].node_index());
if ((!IsSame(pattern.children[0].op, graph_child0_node_view->GetOp()) &&
IsSame(pattern.children[1].op, graph_child0_node_view->GetOp())) ||
(!IsSame(pattern.children[1].op, graph_child1_node_view->GetOp()) &&
IsSame(pattern.children[0].op, graph_child1_node_view->GetOp())))
std::swap(pattern_child_indices[0], pattern_child_indices[1]);
}
for (int i = 0; i < num_children; ++i) {
auto child_node_index = graph_children[i].node_index();
MutableNodeView* child_node_view =
graph_view_->GetNode(child_node_index);
const OpTypePattern& child_pattern =
pattern.children[pattern_child_indices[i]];
match->children.push_back(NodeViewMatch());
NodeViewMatch* child_match = &(match->children.back());
if (!DoesOpTypePatternMatch(child_pattern, child_node_view,
child_match)) {
return false;
}
}
}
}
return true;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::GetMatchedNodes(
const OpTypePattern& pattern,
const std::unordered_set<string>& nodes_to_preserve,
MutableNodeView* node_view, std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
bool found_match = false;
match_ = std::make_unique<NodeViewMatch>();
if (DoesOpTypePatternMatch(pattern, node_view, match_.get())) {
if (IsSafeNodesToRemove(nodes_to_preserve)) {
found_match = true;
*matched_nodes_map = this->node_label_to_index_;
*remove_node_indices = this->remove_node_indices_;
}
} else {
found_match = false;
}
match_->Clear();
match_.reset(nullptr);
matched_node_indices_.clear();
node_label_to_index_.clear();
remove_node_indices_.clear();
return found_match;
}
}
}
} | #include "tensorflow/core/grappler/utils/pattern_utils.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace {
using ::tensorflow::ops::Placeholder;
void GetMatMulBiasAddGeluGraph(GraphDef* graph,
bool add_external_dependent = false) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32});
auto weight_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto weight = Placeholder(s.WithOpName("weight"), DT_FLOAT, weight_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), input, weight);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
if (add_external_dependent) {
auto external_dependent =
ops::Identity(s.WithOpName("external_dependent"), bias_add);
}
auto one_over_square_root_two =
ops::Const(s.WithOpName("one_over_square_root_two"), {0.707f}, {});
auto bias_add_times_const = ops::Mul(s.WithOpName("bias_add_times_const"),
bias_add, one_over_square_root_two);
auto erf = ops::Erf(s.WithOpName("erf"), bias_add_times_const);
auto one = ops::Const(s.WithOpName("one"), {1.0f}, {});
auto erf_plus_one = ops::AddV2(s.WithOpName("erf_plus_one"), erf, one);
auto one_half = ops::Const(s.WithOpName("one_half"), {0.5f}, {});
auto one_half_times_erf_plus_one = ops::Mul(
s.WithOpName("one_half_times_erf_plus_one"), one_half, erf_plus_one);
auto gelu =
ops::Mul(s.WithOpName("gelu"), one_half_times_erf_plus_one, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), gelu);
TF_ASSERT_OK(s.ToGraphDef(graph));
}
OpTypePattern GetMatMulBiasAddGeluPattern() {
OpTypePattern pattern_syntax{"Mul", "my_gelu", NodeStatus::kReplace,
{
{"Mul", "my_one_half_times_erf_plus_one", NodeStatus::kRemove,
{
{"Const", "my_one_half", NodeStatus::kRemain},
{"AddV2", "my_erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "my_erf", NodeStatus::kRemove,
{
{"Mul", "my_bias_add_times_const", NodeStatus::kRemove,
{
{"BiasAdd", "my_bias_add", NodeStatus::kRemove},
{"Const", "my_one_over_square_root_two", NodeStatus::kRemain}
}
}
}
},
{"Const", "my_one", NodeStatus::kRemain}
}
}
}
},
{"BiasAdd", "my_bias_add", NodeStatus::kRemove,
{
{"MatMul", "my_matmul", NodeStatus::kRemove},
{"*", "my_bias", NodeStatus::kRemain}
}
}
}
};
return pattern_syntax;
}
class PatternMatcherTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(PatternMatcherTest, Tree) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove},
{"D", "my_d", NodeStatus::kRemove}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, DAG) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::unordered_set<string> nodes_to_preserve = {"foo"};
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
nodes_to_preserve.insert({"c", "d"});
matched_nodes_map.clear();
remove_node_indices.clear();
found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, DAGExternalDependent) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"f", "F", {"d"}},
{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGelu) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluExternalDependent) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph, true);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluMutation) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
int num_nodes_before = graph_view.NumNodes();
std::vector<string> remove_node_names;
for (auto const& node_idx : remove_node_indices) {
remove_node_names.push_back(graph_view.GetNode(node_idx)->GetName());
}
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef fused_node;
fused_node.set_name("gelu");
fused_node.set_op("_FusedMatMul");
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(0));
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(1));
fused_node.add_input(graph_view.GetNode("bias_add")->node()->input(1));
mutation->AddNode(std::move(fused_node), &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(mutation->Apply());
for (auto const& node_idx : remove_node_indices) {
mutation->RemoveNode(graph_view.GetNode(node_idx));
}
TF_EXPECT_OK(mutation->Apply());
int num_nodes_after = graph_view.NumNodes();
EXPECT_EQ(num_nodes_before - remove_node_indices.size(), num_nodes_after);
bool remove_nodes_deleted = true;
for (auto const& node_name : remove_node_names) {
if (graph_view.GetNode(node_name) != nullptr) {
remove_nodes_deleted = false;
break;
}
}
EXPECT_TRUE(remove_nodes_deleted);
bool replace_node_exist = graph_view.HasNode("gelu") ? true : false;
EXPECT_TRUE(replace_node_exist);
}
TEST_F(PatternMatcherTest, CommutativeInputs) {
::tensorflow::Status status;
std::vector<string> commutative_ops = {"Mul", "Add", "AddV2"};
for (string op : commutative_ops) {
for (bool should_swap : {false, true}) {
std::vector<string> commutative_operands =
(should_swap ? std::vector<string>{"d", "c"}
: std::vector<string>{"c", "d"});
GraphDef graph = CreateGraph({{"e", op, commutative_operands},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{op, "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map,
&remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
}
}
}
}
}
} | template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::DoesOpTypePatternMatch(
const OpTypePattern& pattern, MutableNodeView* node_view,
NodeViewMatch* match) {
if ((node_view->NumControllingFanins() > 0 &&
pattern.node_status != NodeStatus::kRemain) ||
(node_view->NumControlledFanouts() > 0 &&
pattern.node_status == NodeStatus::kRemove))
return false;
bool op_type_matched = false;
if (pattern.op == "*") {
op_type_matched = true;
} else {
std::vector<string> op_list = str_util::Split(pattern.op, '|');
for (const string& op : op_list) {
if (node_view->node()->op() == op) {
op_type_matched = true;
break;
}
}
}
if (op_type_matched) {
if (node_label_to_index_.find(pattern.label) ==
node_label_to_index_.end()) {
node_label_to_index_[pattern.label] = node_view->node_index();
matched_node_indices_.insert(node_view->node_index());
if (pattern.node_status == NodeStatus::kRemove) {
remove_node_indices_.insert(node_view->node_index());
}
} else if (node_label_to_index_[pattern.label] != node_view->node_index()) {
return false;
} else {
DCHECK(node_label_to_index_[pattern.label] == node_view->node_index());
}
} else {
return false;
}
match->node_view = node_view;
if (!pattern.children.empty()) {
auto graph_children = node_view->GetRegularFanins();
int num_children = graph_children.size();
if (num_children != pattern.children.size()) {
return false;
} else {
std::vector<int> pattern_child_indices(num_children);
std::iota(pattern_child_indices.begin(), pattern_child_indices.end(), 0);
string op_name = pattern.op;
if (IsCommutativeOp(op_name) && num_children == 2) {
MutableNodeView* graph_child0_node_view =
graph_view_->GetNode(graph_children[0].node_index());
MutableNodeView* graph_child1_node_view =
graph_view_->GetNode(graph_children[1].node_index());
if ((!IsSame(pattern.children[0].op, graph_child0_node_view->GetOp()) &&
IsSame(pattern.children[1].op, graph_child0_node_view->GetOp())) ||
(!IsSame(pattern.children[1].op, graph_child1_node_view->GetOp()) &&
IsSame(pattern.children[0].op, graph_child1_node_view->GetOp())))
std::swap(pattern_child_indices[0], pattern_child_indices[1]);
}
for (int i = 0; i < num_children; ++i) {
auto child_node_index = graph_children[i].node_index();
MutableNodeView* child_node_view =
graph_view_->GetNode(child_node_index);
const OpTypePattern& child_pattern =
pattern.children[pattern_child_indices[i]];
match->children.push_back(NodeViewMatch());
NodeViewMatch* child_match = &(match->children.back());
if (!DoesOpTypePatternMatch(child_pattern, child_node_view,
child_match)) {
return false;
}
}
}
}
return true;
} | TEST_F(PatternMatcherTest, Tree) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove},
{"D", "my_d", NodeStatus::kRemove}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, DAG) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::unordered_set<string> nodes_to_preserve = {"foo"};
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
nodes_to_preserve.insert({"c", "d"});
matched_nodes_map.clear();
remove_node_indices.clear();
found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, DAGExternalDependent) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"f", "F", {"d"}},
{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGelu) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = str_util::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluExternalDependent) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph, true);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluMutation) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
int num_nodes_before = graph_view.NumNodes();
std::vector<string> remove_node_names;
for (auto const& node_idx : remove_node_indices) {
remove_node_names.push_back(graph_view.GetNode(node_idx)->GetName());
}
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef fused_node;
fused_node.set_name("gelu");
fused_node.set_op("_FusedMatMul");
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(0));
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(1));
fused_node.add_input(graph_view.GetNode("bias_add")->node()->input(1));
mutation->AddNode(std::move(fused_node), &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(mutation->Apply());
for (auto const& node_idx : remove_node_indices) {
mutation->RemoveNode(graph_view.GetNode(node_idx));
}
TF_EXPECT_OK(mutation->Apply());
int num_nodes_after = graph_view.NumNodes();
EXPECT_EQ(num_nodes_before - remove_node_indices.size(), num_nodes_after);
bool remove_nodes_deleted = true;
for (auto const& node_name : remove_node_names) {
if (graph_view.GetNode(node_name) != nullptr) {
remove_nodes_deleted = false;
break;
}
}
EXPECT_TRUE(remove_nodes_deleted);
bool replace_node_exist = graph_view.HasNode("gelu") ? true : false;
EXPECT_TRUE(replace_node_exist);
} |
#include "xla/pjrt/distributed/topology_util.h"
#include <algorithm>
#include <fstream>
#include <map>
#include <set>
#include <string>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/utils.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
static constexpr char kBootIdPath[] = "/proc/sys/kernel/random/boot_id";
absl::StatusOr<std::string> GetBootIdString() {
std::string boot_id_str;
#ifdef __linux__
std::ifstream file(kBootIdPath);
if (!file) {
return NotFound("%s not found.", kBootIdPath);
}
std::string line;
while (std::getline(file, line)) {
absl::StripAsciiWhitespace(&line);
absl::StrAppend(&boot_id_str, line);
}
#endif
return boot_id_str;
}
static std::string GetLocalTopologyKey(std::string_view platform, int node_id) {
return absl::StrCat("local_topology/", platform, "/", node_id);
}
static std::string GetGlobalTopologyKey(std::string_view platform) {
return absl::StrCat("global_topology/", platform);
}
static absl::StatusOr<std::vector<LocalTopologyProto>> GetAllLocalTopologies(
std::string_view platform, int num_nodes, KeyValueStoreInterface* kv_store,
absl::Duration timeout) {
std::vector<absl::StatusOr<std::string>> local_topology_strs(num_nodes);
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), "GetAllLocalTopologies", DefaultThreadPoolSize());
absl::BlockingCounter blocking_counter(num_nodes);
absl::Mutex mu;
for (int i = 0; i < num_nodes; i++) {
thread_pool.Schedule([&, i] {
absl::StatusOr<std::string> local_topology_str =
kv_store->Get(GetLocalTopologyKey(platform, i), timeout);
{
absl::MutexLock lock(&mu);
local_topology_strs[i] = local_topology_str;
}
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
std::vector<std::string> error_messages;
std::vector<LocalTopologyProto> local_topologies;
int max_num_failed_message = 10;
int failed_count = 0;
for (const absl::StatusOr<std::string>& str : local_topology_strs) {
if (str.ok()) {
LocalTopologyProto local;
local.ParseFromString(*str);
local_topologies.push_back(local);
} else {
error_messages.push_back(
absl::StrCat("Error ", ++failed_count, ": ", str.status().message()));
if (failed_count > max_num_failed_message) {
break;
}
}
}
if (error_messages.empty()) {
return local_topologies;
}
return absl::InternalError(
absl::StrCat("Getting local topologies failed: ",
absl::StrJoin(error_messages, "\n\n")));
}
GlobalTopologyProto BuildGlobalTopology(
absl::Span<LocalTopologyProto> local_topologies,
bool assign_global_device_ids) {
GlobalTopologyProto global_topology;
int next_global_device_id = 0;
int next_slice_index = 0;
absl::flat_hash_map<std::string, int> boot_id_to_slice_index;
for (LocalTopologyProto& local : local_topologies) {
std::string_view boot_id = local.boot_id();
auto [it, inserted] =
boot_id_to_slice_index.try_emplace(boot_id, next_slice_index);
if (inserted) {
++next_slice_index;
}
for (DeviceProto& device : *local.mutable_devices()) {
if (assign_global_device_ids) {
device.set_global_device_id(next_global_device_id++);
}
device.set_slice_index(it->second);
}
global_topology.add_nodes()->Swap(&local);
}
if (VLOG_IS_ON(10)) {
for (auto it = boot_id_to_slice_index.begin();
it != boot_id_to_slice_index.end(); ++it) {
LOG(INFO) << "BuildGlobalTopology boot_id_to_slice_index " << it->first
<< "->" << it->second;
}
}
return global_topology;
}
absl::Status ExchangeTopologies(std::string_view platform, int node_id,
int num_nodes,
absl::Duration get_local_topology_timeout,
absl::Duration get_global_topology_timeout,
KeyValueStoreInterface* kv_store,
const LocalTopologyProto& local_topology,
GlobalTopologyProto* global_topology,
bool assign_global_device_ids) {
VLOG(3) << "Local Topology for platform" << platform << ":\n"
<< local_topology.DebugString();
if (num_nodes == 1) {
LocalTopologyProto* topology = global_topology->add_nodes();
*topology = local_topology;
for (DeviceProto& device : *topology->mutable_devices()) {
device.set_global_device_id(device.local_device_ordinal());
}
return absl::OkStatus();
}
CHECK(kv_store != nullptr);
TF_RETURN_IF_ERROR(kv_store->Set(GetLocalTopologyKey(platform, node_id),
local_topology.SerializeAsString()));
std::string global_topology_key = GetGlobalTopologyKey(platform);
if (node_id == 0) {
TF_ASSIGN_OR_RETURN(std::vector<LocalTopologyProto> local_topologies,
GetAllLocalTopologies(platform, num_nodes, kv_store,
get_local_topology_timeout));
*global_topology =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(local_topologies),
assign_global_device_ids);
TF_RETURN_IF_ERROR(kv_store->Set(global_topology_key,
global_topology->SerializeAsString()));
} else {
TF_ASSIGN_OR_RETURN(
std::string global_topology_str,
kv_store->Get(global_topology_key, get_global_topology_timeout));
global_topology->ParseFromString(global_topology_str);
}
VLOG(3) << "Global topology for platform " << platform << ":\n"
<< global_topology->DebugString();
return absl::OkStatus();
}
bool IsGpuTopologySymmetric(
const std::map<int, std::set<int>>& slice_id_to_node_ids,
const std::map<int, int>& node_id_to_device_count) {
CHECK(!slice_id_to_node_ids.empty());
CHECK(!node_id_to_device_count.empty());
int num_hosts_per_slice = slice_id_to_node_ids.begin()->second.size();
int num_devices_per_host = node_id_to_device_count.begin()->second;
for (const auto& [slice_id, node_ids] : slice_id_to_node_ids) {
if (node_ids.size() != num_hosts_per_slice) {
LOG(INFO) << "GpuTopology is asymmetric as it has different number "
"of hosts per slice.";
return false;
}
}
for (const auto& [node_id, device_count] : node_id_to_device_count) {
if (device_count != num_devices_per_host) {
LOG(INFO) << "GpuTopology is asymmetric as it has different number "
"of devices per host.";
return false;
}
}
return true;
}
absl::StatusOr<GpuTopologyProto> BuildGpuTopology(
const GlobalTopologyProto& global_topology) {
GpuTopologyProto gpu_topology;
std::map<int, std::set<int>> slice_id_to_node_ids;
std::map<int, int> node_id_to_device_count;
std::vector<int> device_ids;
for (int i = 0; i < global_topology.nodes_size(); ++i) {
const LocalTopologyProto& local_topology = global_topology.nodes(i);
node_id_to_device_count[local_topology.node_id()] =
local_topology.devices_size();
for (const DeviceProto& device : local_topology.devices()) {
if (gpu_topology.platform_version().empty()) {
gpu_topology.set_platform_version(device.name());
}
slice_id_to_node_ids[device.slice_index()].insert(
local_topology.node_id());
device_ids.push_back(device.global_device_id());
}
}
if (IsGpuTopologySymmetric(slice_id_to_node_ids, node_id_to_device_count)) {
gpu_topology.set_num_slices(slice_id_to_node_ids.size());
gpu_topology.set_num_hosts_per_slice(
slice_id_to_node_ids.begin()->second.size());
gpu_topology.set_num_devices_per_host(
node_id_to_device_count.begin()->second);
} else {
gpu_topology.set_num_slices(-1);
gpu_topology.set_num_hosts_per_slice(-1);
gpu_topology.set_num_devices_per_host(-1);
}
std::sort(device_ids.begin(), device_ids.end());
gpu_topology.mutable_device_ids()->Add(device_ids.begin(), device_ids.end());
return gpu_topology;
}
} | #include "xla/pjrt/distributed/topology_util.h"
#include <string>
#include <string_view>
#include <vector>
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/test_helpers.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
TEST(TopologyTest, BuildGlobalTopology) {
std::vector<LocalTopologyProto> locals(2);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
EXPECT_EQ(global.nodes_size(), 2);
EXPECT_EQ(global.nodes()[0].devices_size(), 2);
EXPECT_EQ(global.nodes()[1].devices_size(), 2);
}
TEST(TopologyTest, ExchangeTopology) {
int num_nodes = 2;
std::vector<LocalTopologyProto> locals(num_nodes);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
InMemoryKeyValueStore kv_store;
std::vector<GlobalTopologyProto> globals(num_nodes);
{
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "TestPool",
num_nodes);
for (int i = 0; i < num_nodes; i++) {
thread_pool.Schedule([&, i] {
TF_ASSERT_OK(ExchangeTopologies(
"cuda", i, num_nodes,
absl::Seconds(10),
absl::Seconds(10), &kv_store, locals[i], &globals[i],
true));
});
}
}
for (const GlobalTopologyProto& global : globals) {
EXPECT_EQ(global.nodes_size(), 2);
EXPECT_EQ(global.nodes()[0].devices_size(), 2);
EXPECT_EQ(global.nodes()[1].devices_size(), 2);
}
}
TEST(TopologyTest, BuildGpuTopology) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(2);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(1);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 4);
EXPECT_EQ(gpu_topology.num_slices(), 2);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), 1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), 2);
}
TEST(TopologyTest, BuildGpuTopologyWithDifferentNumHostsPerSlice) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(3);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_0_boot_id);
locals[2].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
locals[2].set_node_id(2);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[1].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[2].add_devices();
d2->set_local_device_ordinal(0);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 3);
EXPECT_EQ(gpu_topology.num_slices(), -1);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), -1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), -1);
}
TEST(TopologyTest, BuildGpuTopologyWithDifferentNumDevicesPerHost) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(2);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(1);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 3);
EXPECT_EQ(gpu_topology.num_slices(), -1);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), -1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), -1);
}
}
} | GlobalTopologyProto BuildGlobalTopology(
absl::Span<LocalTopologyProto> local_topologies,
bool assign_global_device_ids) {
GlobalTopologyProto global_topology;
int next_global_device_id = 0;
int next_slice_index = 0;
absl::flat_hash_map<std::string, int> boot_id_to_slice_index;
for (LocalTopologyProto& local : local_topologies) {
std::string_view boot_id = local.boot_id();
auto [it, inserted] =
boot_id_to_slice_index.try_emplace(boot_id, next_slice_index);
if (inserted) {
++next_slice_index;
}
for (DeviceProto& device : *local.mutable_devices()) {
if (assign_global_device_ids) {
device.set_global_device_id(next_global_device_id++);
}
device.set_slice_index(it->second);
}
global_topology.add_nodes()->Swap(&local);
}
if (VLOG_IS_ON(10)) {
for (auto it = boot_id_to_slice_index.begin();
it != boot_id_to_slice_index.end(); ++it) {
LOG(INFO) << "BuildGlobalTopology boot_id_to_slice_index " << it->first
<< "->" << it->second;
}
}
return global_topology;
} | #include "xla/pjrt/distributed/topology_util.h"
#include <string>
#include <string_view>
#include <vector>
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/test_helpers.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
TEST(TopologyTest, BuildGlobalTopology) {
std::vector<LocalTopologyProto> locals(2);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
EXPECT_EQ(global.nodes_size(), 2);
EXPECT_EQ(global.nodes()[0].devices_size(), 2);
EXPECT_EQ(global.nodes()[1].devices_size(), 2);
}
TEST(TopologyTest, BuildGpuTopology) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(2);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(1);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 4);
EXPECT_EQ(gpu_topology.num_slices(), 2);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), 1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), 2);
}
TEST(TopologyTest, BuildGpuTopologyWithDifferentNumHostsPerSlice) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(3);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_0_boot_id);
locals[2].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
locals[2].set_node_id(2);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[1].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[2].add_devices();
d2->set_local_device_ordinal(0);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 3);
EXPECT_EQ(gpu_topology.num_slices(), -1);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), -1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), -1);
}
TEST(TopologyTest, BuildGpuTopologyWithDifferentNumDevicesPerHost) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(2);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(1);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 3);
EXPECT_EQ(gpu_topology.num_slices(), -1);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), -1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), -1);
} |
#include "xla/service/while_loop_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
using std::nullopt;
using std::optional;
namespace m = match;
static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
}
static optional<int64_t> GetGTEOperandIndex(const HloInstruction* instr,
const HloInstruction* gte_operand) {
VLOG(2) << "GetGTEOperandIndex(" << instr->ToString() << ", "
<< gte_operand->ToString() << ")";
optional<int64_t> tuple_idx;
for (const HloInstruction* operand : instr->operands()) {
if (Match(operand, m::Constant())) {
continue;
}
auto possibly_gte_operand = operand;
if (operand->opcode() == HloOpcode::kCopy) {
possibly_gte_operand = operand->operand(0);
}
if (possibly_gte_operand->opcode() != HloOpcode::kGetTupleElement) {
return nullopt;
}
if (!Match(possibly_gte_operand,
m::GetTupleElement(m::Op().Is(gte_operand)))) {
return nullopt;
}
int64_t operand_tuple_idx = possibly_gte_operand->tuple_index();
if (!tuple_idx.has_value()) {
tuple_idx = operand_tuple_idx;
} else {
if (operand_tuple_idx != tuple_idx) {
return nullopt;
}
}
}
return tuple_idx;
}
std::vector<const HloInstruction*> GetAuxiliaryLoopInductionVars(
const HloInstruction* while_op) {
std::vector<const HloInstruction*> aux_ind_gte;
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* while_body_param = while_body->parameter_instruction(0);
VLOG(2) << "Aux Induction Variables for loop:" << while_op->ToShortString();
VLOG(2) << "the parameter instr:" << while_body_param->ToShortString();
VLOG(2) << "the parameter user count:" << while_body_param->users().size();
if (while_body_param == nullptr) return aux_ind_gte;
std::map<int64_t, const HloInstruction*> extractions;
for (const HloInstruction* indx_instr : while_body_param->users()) {
if (indx_instr->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
auto it = extractions.find(indx_instr->tuple_index());
if (it != extractions.end()) {
it->second = nullptr;
VLOG(2) << "two extractions at same index:" << indx_instr->ToString();
} else {
extractions.insert(std::make_pair(indx_instr->tuple_index(), indx_instr));
VLOG(2) << "inserting extraction :" << indx_instr->ToString();
}
}
VLOG(2) << "total extractions size:" << extractions.size() << std::endl;
if (extractions.empty()) {
return aux_ind_gte;
}
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body root is not a tuple:" << while_body_root->ToString();
return aux_ind_gte;
}
int64_t index = -1;
std::map<int64_t, const HloInstruction*> insertions;
for (const HloInstruction* operand : while_body_root->operands()) {
index++;
if (!operand->IsConstant()) {
auto it = insertions.find(index);
if (it != insertions.end()) {
it->second = nullptr;
VLOG(2) << "two insertions at same index:" << operand->ToString();
} else {
insertions.insert(std::make_pair(index, operand));
VLOG(2) << "inserting insertions:" << operand->ToString();
}
}
}
if (insertions.empty()) {
return aux_ind_gte;
}
std::map<int64_t, std::pair<const HloInstruction*, const HloInstruction*>>
candidate_pairs;
for (; index >= 0; --index) {
const HloInstruction *ext, *inst;
ext = (extractions.find(index) != extractions.end())
? extractions.find(index)->second
: nullptr;
inst = (insertions.find(index) != insertions.end())
? insertions.find(index)->second
: nullptr;
if (ext != nullptr && inst != nullptr) {
if (ext != inst) {
candidate_pairs.insert(
std::make_pair(index, std::make_pair(ext, inst)));
}
}
}
VLOG(2) << "total candidate pairs:" << candidate_pairs.size() << std::endl;
const auto add_dependencies = [](const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs) {
HloInstruction* non_const_operand = nullptr;
int num_non_constants = 0;
for (HloInstruction* operand : hlo->operands()) {
if (!operand->IsConstant()) {
num_non_constants++;
non_const_operand = operand;
}
}
if (num_non_constants == 1 &&
(hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kAdd ||
hlo->opcode() == HloOpcode::kMultiply ||
hlo->opcode() == HloOpcode::kDivide ||
hlo->opcode() == HloOpcode::kSubtract)) {
inputs->push_back(non_const_operand);
}
};
std::unique_ptr<HloReachabilityMap> hrm =
HloReachabilityMap::BuildWithRestrictions(
while_body,
absl::FunctionRef<void(const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs)>(
add_dependencies));
for (auto candidates : candidate_pairs) {
VLOG(2) << "are reachable?:" << (candidates.second.first)->ToString()
<< "*************" << (candidates.second.second)->ToString()
<< std::endl;
if (hrm->IsReachable(candidates.second.first, candidates.second.second)) {
aux_ind_gte.push_back(candidates.second.first);
VLOG(2) << "YES";
} else {
VLOG(2) << "NO";
}
}
VLOG(2) << "num auxiliary candidates :" << aux_ind_gte.size();
return aux_ind_gte;
}
optional<int64_t> GetLoopInductionVarTupleIdx(const HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
VLOG(2) << "Finding induction variable for loop "
<< while_op->ToShortString();
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_param = while_cond->parameter_instruction(0);
optional<int64_t> indvar_tuple_idx =
GetGTEOperandIndex(while_cond_root, while_cond_param);
if (!indvar_tuple_idx) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body's root is not a tuple instruction: "
<< while_body_root->ToString();
return nullopt;
}
auto* while_body_inc = while_body_root->operand(*indvar_tuple_idx);
auto* while_body_param = while_body->parameter_instruction(0);
optional<int64_t> while_body_indvar_tuple_idx =
GetGTEOperandIndex(while_body_inc, while_body_param);
if (!while_body_indvar_tuple_idx) {
VLOG(2)
<< "Induction variable not found in while body increment instruction: "
<< while_body_inc->ToString();
return nullopt;
}
if (while_body_indvar_tuple_idx != indvar_tuple_idx) {
VLOG(2) << "Tuple index of induction variable does not match between loop "
"condition ("
<< *indvar_tuple_idx << ") and while body ("
<< *while_body_indvar_tuple_idx << ")";
return nullopt;
}
auto* while_init = while_op->operand(0);
if (while_init->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While init expected to be a tuple: " << while_init->ToString();
return nullopt;
}
VLOG(2) << "Induction variable's tuple index: " << *indvar_tuple_idx;
return indvar_tuple_idx;
}
optional<int64_t> CheckedAdd(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa + bb);
if (a >= 0 == b >= 0 && result >= 0 != a >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> CheckedSubtract(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa - bb);
if (a >= 0 != b >= 0 && result >= 0 == b >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> MatchTrivialLoopTripCount(const HloInstruction* while_op,
int64_t indvar_tuple_idx,
const Literal& indvar_init) {
optional<int64_t> indvar_init_val =
LiteralUtil::LiteralAsScalarInt64(indvar_init);
if (!indvar_init_val) {
VLOG(2) << "Pattern-match failed: induction variable init is not a "
"constant scalar representable as an int64_t: "
<< indvar_init.ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_indvar_update =
while_body->root_instruction()->mutable_operand(indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
HloInstruction* trip_count_increase_step_instr = nullptr;
int64_t trip_count_step = 0;
if (!Match(while_body_indvar_update,
m::AddAnyOrder(m::Op().Is(while_body_indvar),
m::Op(&trip_count_increase_step_instr)))) {
if (trip_count_increase_step_instr == nullptr) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"updated by an add operation: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!trip_count_increase_step_instr->IsConstant() ||
!ShapeUtil::IsEffectiveScalar(
trip_count_increase_step_instr->shape())) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"incremented by constant: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.has_value()) {
VLOG(2)
<< "Pattern-match failed: trip count step is not an integral type: "
<< trip_count_increase_step_instr->shape().ToString();
return nullopt;
}
VLOG(2) << "Pattern-match for trip count step failed: "
<< trip_count_increase_step_instr->ToString();
}
trip_count_step = LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.value();
if (trip_count_step <= 0) {
VLOG(2) << "Pattern-match failed: trip count step is not a natural number: "
<< trip_count_step;
return nullopt;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
HloInstruction* while_cond_bound = nullptr;
if (!Match(while_cond_root,
m::Op().WithBinaryOperandsAnyOrder(
m::Op().Is(while_cond_indvar),
m::ConstantEffectiveScalar(&while_cond_bound)))) {
VLOG(2) << "Pattern-match failed: while condition is not of the form "
"op(i, N) or op(N, i).";
return nullopt;
}
optional<int64_t> while_cond_bound_val =
LiteralUtil::LiteralAsScalarInt64(while_cond_bound->literal());
if (!while_cond_bound_val) {
VLOG(2) << "Pattern-match failed: while condition induction variable is "
"not a constant scalar representable as an int64_t.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLt)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i < N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (trips) {
const int64_t remainder = std::remainder(*trips, trip_count_step);
const int64_t div = std::floor(*trips / trip_count_step);
if (remainder == 0) {
return std::max(int64_t{0}, div);
}
trips = CheckedAdd(div, 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (*trips < *while_cond_bound_val) {
return std::max(int64_t{0}, *trips);
}
return std::max(int64_t{0}, div);
}
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLe)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i <= N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
trips = CheckedAdd(std::floor(*trips / trip_count_step), 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
return std::max<int64_t>(0, *trips);
}
VLOG(2) << "Pattern-match failed: while condition follows unknown pattern: "
<< while_cond_root->ToString();
return nullopt;
}
optional<int64_t> ComputeWhileLoopTripCount(const HloInstruction* while_op,
int64_t max_brute_force_iters) {
VLOG(2) << "Getting trip count for loop " << while_op->ToString();
optional<int64_t> indvar_tuple_idx = GetLoopInductionVarTupleIdx(while_op);
if (!indvar_tuple_idx) {
return nullopt;
}
HloEvaluator evaluator(0);
auto* while_init = while_op->operand(0);
auto* indvar_init = while_init->operand(*indvar_tuple_idx);
absl::StatusOr<Literal> indvar_init_result = evaluator.Evaluate(indvar_init);
if (!indvar_init_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable init, "
<< indvar_init_result.status() << ", " << indvar_init->ToString();
return nullopt;
}
Literal indvar_iter_val = std::move(indvar_init_result).value();
if (auto trip_count = MatchTrivialLoopTripCount(while_op, *indvar_tuple_idx,
indvar_iter_val)) {
return trip_count;
}
auto* while_body = while_op->while_body();
auto* while_body_indvar_update =
while_body->root_instruction()->operand(*indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
for (int64_t trip_count = 0; trip_count != max_brute_force_iters + 1;
++trip_count) {
absl::StatusOr<Literal> result = evaluator.EvaluateWithSubstitutions(
while_cond_root, {{while_cond_indvar, &indvar_iter_val}});
if (!result.ok()) {
VLOG(2) << "Couldn't evaluate while cond: " << result.status();
return nullopt;
}
if (result.value().data<bool>() == absl::Span<const bool>{false}) {
VLOG(2) << "Loop has static trip count of " << trip_count;
return trip_count;
}
absl::StatusOr<Literal> indvar_next_result =
evaluator.EvaluateWithSubstitutions(
while_body_indvar_update, {{while_body_indvar, &indvar_iter_val}});
if (!indvar_next_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable update: "
<< indvar_next_result.status();
return nullopt;
}
indvar_iter_val = std::move(indvar_next_result).value();
}
VLOG(2) << "Loop has unknown trip count.";
return nullopt;
}
static HloInstruction* GetOnlyGTE(HloInstruction* inst) {
if (inst->user_count() != 1) {
return nullptr;
}
HloInstruction* user = inst->users().back();
if (user->opcode() != HloOpcode::kGetTupleElement) {
return nullptr;
}
return user;
}
optional<int64_t> ComputeWhileLoopTripCountUpperBound(
const HloInstruction* while_op) {
auto exact_trip_count = ComputeWhileLoopTripCount(while_op);
if (exact_trip_count) {
VLOG(2) << "Loop has exact trip count.";
return exact_trip_count;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_param = while_cond->parameter_instruction(0);
auto* cond_gte = GetOnlyGTE(while_cond_param);
if (!cond_gte) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(3) << "While body's root is not a tuple instruction: "
<< while_body_root->ToString();
return nullopt;
}
int64_t indvar_index = cond_gte->tuple_index();
auto* while_body_indvar = while_body_root->operand(indvar_index);
if (while_body_indvar->opcode() != HloOpcode::kConstant) {
VLOG(3) << "While body does not set the IV to a constant: "
<< while_body_indvar->ToString();
return nullopt;
}
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
auto new_param = HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({cond_gte->shape()}), "temp");
replacements[cond_gte] =
HloInstruction::CreateGetTupleElement(new_param.get(), 0);
replacements[while_cond_param] = std::move(new_param);
auto new_module = std::make_unique<HloModule>("temp_mod", HloModuleConfig{});
auto* new_computation = new_module->AddEmbeddedComputation(
while_cond->CloneWithReplacements(&replacements));
HloEvaluator evaluator(0);
Literal fake_input = Literal::CreateFromShape(
new_computation->parameter_instruction(0)->shape());
TF_CHECK_OK(fake_input.CopyFrom(while_body_indvar->literal(),
{0},
{}));
absl::StatusOr<Literal> eval_result =
evaluator.Evaluate(*new_computation, {std::move(fake_input)});
if (!eval_result.ok()) {
VLOG(2) << "Couldn't evaluate while loop condition.";
return nullopt;
}
Literal cond_result_pred = std::move(eval_result.value());
CHECK(Shape::Equal().IgnoreLayout()(cond_result_pred.shape(),
ShapeUtil::MakeShape(PRED, {})));
bool cond_returns_true = cond_result_pred.GetFirstElement<bool>();
if (!cond_returns_true) {
VLOG(2) << "Upper bound on the trip count is 1";
return 1;
}
VLOG(2) << "Loop has no known upper bound on the trip count.";
return nullopt;
}
} | #include "xla/service/while_loop_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class WhileLoopAnalysisTest : public HloTestBase {
protected:
[[nodiscard]] absl::StatusOr<int64_t> MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir);
};
absl::StatusOr<int64_t> WhileLoopAnalysisTest::MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir) {
std::string hlo_string_template = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
index = s32[] get-tuple-element(p_body), index=1
one = s32[] constant({{STEP}})
inc = s32[] add(index, one)
ROOT root = (f32[2], s32[]) tuple(val, inc)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant({{LIMIT}})
ROOT result = pred[] compare(gte, const), direction={{COMP_DIR}}
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] constant({{INIT}})
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
}
)";
std::string hlo_string =
absl::StrReplaceAll(hlo_string_template,
{{"{{INIT}}", absl::StrCat(init)},
{"{{LIMIT}}", absl::StrCat(limit)},
{"{{STEP}}", absl::StrCat(step)},
{"{{COMP_DIR}}", ComparisonDirectionToString(dir)}});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::optional<int64_t> trip_count = MatchTrivialLoopTripCount(
while_op, 1,
Cast<HloConstantInstruction>(
module->GetComputationWithName("entry")->GetInstructionWithName(
"param.1"))
->literal());
CHECK(trip_count.has_value());
return *trip_count;
}
TEST_F(WhileLoopAnalysisTest, SingleIterationUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(-1)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 1);
}
TEST_F(WhileLoopAnalysisTest, NoUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(42)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(ComputeWhileLoopTripCountUpperBound(while_op), std::nullopt);
}
int CalculateTripCount(int init, int limit, int step, ComparisonDirection dir) {
int trip_count = 0;
if (dir == ComparisonDirection::kLt) {
for (int i = init; i < limit; i += step) {
trip_count++;
}
} else if (dir == ComparisonDirection::kLe) {
for (int i = init; i <= limit; i += step) {
trip_count++;
}
} else {
LOG(FATAL) << "Unknown comparison direction: "
<< ComparisonDirectionToString(dir);
}
return trip_count;
}
TEST_F(WhileLoopAnalysisTest, ExactBoundTrivialTripCount) {
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLe));
}
TEST_F(WhileLoopAnalysisTest, NoAIVNoConstChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
val3 = s32[] get-tuple-element(p_body), index=2
add = s32[] add(val2, val3)
sub = s32[] subtract(add, val3)
ROOT root = (f32[2], s32[], s32[]) tuple(val1, add, sub)
}
condition {
p_cond = (f32[2], s32[], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
param.2 = s32[] parameter(2)
while_init = (f32[2], s32[], s32[]) tuple(param.0, param.1, param.2)
ROOT while = (f32[2], s32[], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVMultiChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const.1 = s32[] constant(42)
const.2 = s32[] constant(42)
const.3 = s32[] constant(42)
add = s32[] add(val2, const.1)
sub = s32[] subtract(add, const.2)
mul = s32[] multiply(sub, const.3)
ROOT root = (f32[2], s32[]) tuple(val1, mul)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
TEST_F(WhileLoopAnalysisTest, NoAIV) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
add = s32[] add(val2, val2)
const.1 = s32[] constant(42)
mul = s32[] multiply(add, const.1)
div = s32[] divide(mul, add)
ROOT root = (f32[2], s32[]) tuple(val1, div)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVNoChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
add = s32[] add(val2, const)
ROOT root = (f32[2], s32[]) tuple(val1, add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
}
} | static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
} | TEST_F(WhileLoopAnalysisTest, AIVMultiChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const.1 = s32[] constant(42)
const.2 = s32[] constant(42)
const.3 = s32[] constant(42)
add = s32[] add(val2, const.1)
sub = s32[] subtract(add, const.2)
mul = s32[] multiply(sub, const.3)
ROOT root = (f32[2], s32[]) tuple(val1, mul)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
TEST_F(WhileLoopAnalysisTest, AIVNoChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
add = s32[] add(val2, const)
ROOT root = (f32[2], s32[]) tuple(val1, add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
} |
#ifndef ABSL_RANDOM_DISTRIBUTIONS_H_
#define ABSL_RANDOM_DISTRIBUTIONS_H_
#include <limits>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/base/internal/inline_variable.h"
#include "absl/meta/type_traits.h"
#include "absl/random/bernoulli_distribution.h"
#include "absl/random/beta_distribution.h"
#include "absl/random/exponential_distribution.h"
#include "absl/random/gaussian_distribution.h"
#include "absl/random/internal/distribution_caller.h"
#include "absl/random/internal/traits.h"
#include "absl/random/internal/uniform_helper.h"
#include "absl/random/log_uniform_int_distribution.h"
#include "absl/random/poisson_distribution.h"
#include "absl/random/uniform_int_distribution.h"
#include "absl/random/uniform_real_distribution.h"
#include "absl/random/zipf_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosedClosed,
{});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosed, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedOpenTag, IntervalClosedOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpenOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenClosedTag, IntervalOpenClosed, {});
template <typename R = void, typename TagType, typename URBG>
typename absl::enable_if_t<!std::is_same<R, void>::value, R>
Uniform(TagType tag,
URBG&& urbg,
R lo, R hi) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
auto a = random_internal::uniform_lower_bound(tag, lo, hi);
auto b = random_internal::uniform_upper_bound(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, tag, lo, hi);
}
template <typename R = void, typename URBG>
typename absl::enable_if_t<!std::is_same<R, void>::value, R>
Uniform(URBG&& urbg,
R lo, R hi) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
constexpr auto tag = absl::IntervalClosedOpen;
auto a = random_internal::uniform_lower_bound(tag, lo, hi);
auto b = random_internal::uniform_upper_bound(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lo, hi);
}
template <typename R = void, typename TagType, typename URBG, typename A,
typename B>
typename absl::enable_if_t<std::is_same<R, void>::value,
random_internal::uniform_inferred_return_t<A, B>>
Uniform(TagType tag,
URBG&& urbg,
A lo, B hi) {
using gen_t = absl::decay_t<URBG>;
using return_t = typename random_internal::uniform_inferred_return_t<A, B>;
using distribution_t = random_internal::UniformDistributionWrapper<return_t>;
auto a = random_internal::uniform_lower_bound<return_t>(tag, lo, hi);
auto b = random_internal::uniform_upper_bound<return_t>(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, tag, static_cast<return_t>(lo),
static_cast<return_t>(hi));
}
template <typename R = void, typename URBG, typename A, typename B>
typename absl::enable_if_t<std::is_same<R, void>::value,
random_internal::uniform_inferred_return_t<A, B>>
Uniform(URBG&& urbg,
A lo, B hi) {
using gen_t = absl::decay_t<URBG>;
using return_t = typename random_internal::uniform_inferred_return_t<A, B>;
using distribution_t = random_internal::UniformDistributionWrapper<return_t>;
constexpr auto tag = absl::IntervalClosedOpen;
auto a = random_internal::uniform_lower_bound<return_t>(tag, lo, hi);
auto b = random_internal::uniform_upper_bound<return_t>(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, static_cast<return_t>(lo),
static_cast<return_t>(hi));
}
template <typename R, typename URBG>
typename absl::enable_if_t<!std::numeric_limits<R>::is_signed, R>
Uniform(URBG&& urbg) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg);
}
template <typename URBG>
bool Bernoulli(URBG&& urbg,
double p) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = absl::bernoulli_distribution;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, p);
}
template <typename RealType, typename URBG>
RealType Beta(URBG&& urbg,
RealType alpha, RealType beta) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Beta<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::beta_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, alpha, beta);
}
template <typename RealType, typename URBG>
RealType Exponential(URBG&& urbg,
RealType lambda = 1) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Exponential<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::exponential_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lambda);
}
template <typename RealType, typename URBG>
RealType Gaussian(URBG&& urbg,
RealType mean = 0, RealType stddev = 1) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Gaussian<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::gaussian_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, mean, stddev);
}
template <typename IntType, typename URBG>
IntType LogUniform(URBG&& urbg,
IntType lo, IntType hi, IntType base = 2) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::LogUniform<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::log_uniform_int_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lo, hi, base);
}
template <typename IntType, typename URBG>
IntType Poisson(URBG&& urbg,
double mean = 1.0) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Poisson<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::poisson_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, mean);
}
template <typename IntType, typename URBG>
IntType Zipf(URBG&& urbg,
IntType hi = (std::numeric_limits<IntType>::max)(), double q = 2.0,
double v = 1.0) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Zipf<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::zipf_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, hi, q, v);
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/distributions.h"
#include <cfloat>
#include <cmath>
#include <cstdint>
#include <limits>
#include <type_traits>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/random.h"
namespace {
constexpr int kSize = 400000;
class RandomDistributionsTest : public testing::Test {};
struct Invalid {};
template <typename A, typename B>
auto InferredUniformReturnT(int)
-> decltype(absl::Uniform(std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename>
Invalid InferredUniformReturnT(...);
template <typename TagType, typename A, typename B>
auto InferredTaggedUniformReturnT(int)
-> decltype(absl::Uniform(std::declval<TagType>(),
std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename>
Invalid InferredTaggedUniformReturnT(...);
template <typename A, typename B, typename Expect>
void CheckArgsInferType() {
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredUniformReturnT<A, B>(0))>,
std::is_same<Expect,
decltype(InferredUniformReturnT<B, A>(0))>>::value,
"");
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredTaggedUniformReturnT<
absl::IntervalOpenOpenTag, A, B>(0))>,
std::is_same<Expect,
decltype(InferredTaggedUniformReturnT<
absl::IntervalOpenOpenTag, B, A>(0))>>::value,
"");
}
template <typename A, typename B, typename ExplicitRet>
auto ExplicitUniformReturnT(int) -> decltype(absl::Uniform<ExplicitRet>(
std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename ExplicitRet>
Invalid ExplicitUniformReturnT(...);
template <typename TagType, typename A, typename B, typename ExplicitRet>
auto ExplicitTaggedUniformReturnT(int)
-> decltype(absl::Uniform<ExplicitRet>(
std::declval<TagType>(), std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename, typename ExplicitRet>
Invalid ExplicitTaggedUniformReturnT(...);
template <typename A, typename B, typename Expect>
void CheckArgsReturnExpectedType() {
static_assert(
absl::conjunction<
std::is_same<Expect,
decltype(ExplicitUniformReturnT<A, B, Expect>(0))>,
std::is_same<Expect, decltype(ExplicitUniformReturnT<B, A, Expect>(
0))>>::value,
"");
static_assert(
absl::conjunction<
std::is_same<Expect,
decltype(ExplicitTaggedUniformReturnT<
absl::IntervalOpenOpenTag, A, B, Expect>(0))>,
std::is_same<Expect, decltype(ExplicitTaggedUniformReturnT<
absl::IntervalOpenOpenTag, B, A,
Expect>(0))>>::value,
"");
}
template <typename R>
auto UniformNoBoundsReturnT(int)
-> decltype(absl::Uniform<R>(std::declval<absl::InsecureBitGen&>()));
template <typename>
Invalid UniformNoBoundsReturnT(...);
TEST_F(RandomDistributionsTest, UniformTypeInference) {
CheckArgsInferType<uint16_t, uint16_t, uint16_t>();
CheckArgsInferType<uint32_t, uint32_t, uint32_t>();
CheckArgsInferType<uint64_t, uint64_t, uint64_t>();
CheckArgsInferType<int16_t, int16_t, int16_t>();
CheckArgsInferType<int32_t, int32_t, int32_t>();
CheckArgsInferType<int64_t, int64_t, int64_t>();
CheckArgsInferType<float, float, float>();
CheckArgsInferType<double, double, double>();
CheckArgsReturnExpectedType<int16_t, int16_t, int32_t>();
CheckArgsReturnExpectedType<uint16_t, uint16_t, int32_t>();
CheckArgsReturnExpectedType<int16_t, int16_t, int64_t>();
CheckArgsReturnExpectedType<int16_t, int32_t, int64_t>();
CheckArgsReturnExpectedType<int16_t, int32_t, double>();
CheckArgsReturnExpectedType<float, float, double>();
CheckArgsReturnExpectedType<int, int, int16_t>();
CheckArgsInferType<uint16_t, uint32_t, uint32_t>();
CheckArgsInferType<uint16_t, uint64_t, uint64_t>();
CheckArgsInferType<uint16_t, int32_t, int32_t>();
CheckArgsInferType<uint16_t, int64_t, int64_t>();
CheckArgsInferType<uint16_t, float, float>();
CheckArgsInferType<uint16_t, double, double>();
CheckArgsInferType<int16_t, int32_t, int32_t>();
CheckArgsInferType<int16_t, int64_t, int64_t>();
CheckArgsInferType<int16_t, float, float>();
CheckArgsInferType<int16_t, double, double>();
CheckArgsInferType<uint16_t, int16_t, Invalid>();
CheckArgsInferType<int16_t, uint32_t, Invalid>();
CheckArgsInferType<int16_t, uint64_t, Invalid>();
CheckArgsInferType<uint32_t, uint64_t, uint64_t>();
CheckArgsInferType<uint32_t, int64_t, int64_t>();
CheckArgsInferType<uint32_t, double, double>();
CheckArgsInferType<int32_t, int64_t, int64_t>();
CheckArgsInferType<int32_t, double, double>();
CheckArgsInferType<uint32_t, int32_t, Invalid>();
CheckArgsInferType<int32_t, uint64_t, Invalid>();
CheckArgsInferType<int32_t, float, Invalid>();
CheckArgsInferType<uint32_t, float, Invalid>();
CheckArgsInferType<uint64_t, int64_t, Invalid>();
CheckArgsInferType<int64_t, float, Invalid>();
CheckArgsInferType<int64_t, double, Invalid>();
CheckArgsInferType<float, double, double>();
}
TEST_F(RandomDistributionsTest, UniformExamples) {
absl::InsecureBitGen gen;
EXPECT_NE(1, absl::Uniform(gen, static_cast<uint16_t>(0), 1.0f));
EXPECT_NE(1, absl::Uniform(gen, 0, 1.0));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen,
static_cast<uint16_t>(0), 1.0f));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen, 0, 1.0));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen, -1, 1.0));
EXPECT_NE(1, absl::Uniform<double>(absl::IntervalOpenOpen, gen, -1, 1));
EXPECT_NE(1, absl::Uniform<float>(absl::IntervalOpenOpen, gen, 0, 1));
EXPECT_NE(1, absl::Uniform<float>(gen, 0, 1));
}
TEST_F(RandomDistributionsTest, UniformNoBounds) {
absl::InsecureBitGen gen;
absl::Uniform<uint8_t>(gen);
absl::Uniform<uint16_t>(gen);
absl::Uniform<uint32_t>(gen);
absl::Uniform<uint64_t>(gen);
absl::Uniform<absl::uint128>(gen);
testing::StaticAssertTypeEq<uint8_t,
decltype(UniformNoBoundsReturnT<uint8_t>(0))>();
testing::StaticAssertTypeEq<uint16_t,
decltype(UniformNoBoundsReturnT<uint16_t>(0))>();
testing::StaticAssertTypeEq<uint32_t,
decltype(UniformNoBoundsReturnT<uint32_t>(0))>();
testing::StaticAssertTypeEq<uint64_t,
decltype(UniformNoBoundsReturnT<uint64_t>(0))>();
testing::StaticAssertTypeEq<
absl::uint128, decltype(UniformNoBoundsReturnT<absl::uint128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int8_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int16_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int32_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int64_t>(0))>();
testing::StaticAssertTypeEq<
Invalid, decltype(UniformNoBoundsReturnT<absl::int128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<float>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<double>(0))>();
}
TEST_F(RandomDistributionsTest, UniformNonsenseRanges) {
#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
GTEST_SKIP()
<< "Skipping the test because we detected x87 floating-point semantics";
#endif
absl::InsecureBitGen gen;
EXPECT_EQ(0, absl::Uniform<uint64_t>(gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<uint64_t>(gen, 1, 0));
EXPECT_EQ(0, absl::Uniform<uint64_t>(absl::IntervalOpenOpen, gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<uint64_t>(absl::IntervalOpenOpen, gen, 1, 0));
constexpr auto m = (std::numeric_limits<uint64_t>::max)();
EXPECT_EQ(m, absl::Uniform(gen, m, m));
EXPECT_EQ(m, absl::Uniform(gen, m, m - 1));
EXPECT_EQ(m - 1, absl::Uniform(gen, m - 1, m));
EXPECT_EQ(m, absl::Uniform(absl::IntervalOpenOpen, gen, m, m));
EXPECT_EQ(m, absl::Uniform(absl::IntervalOpenOpen, gen, m, m - 1));
EXPECT_EQ(m - 1, absl::Uniform(absl::IntervalOpenOpen, gen, m - 1, m));
EXPECT_EQ(0, absl::Uniform<int64_t>(gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<int64_t>(gen, 1, 0));
EXPECT_EQ(0, absl::Uniform<int64_t>(absl::IntervalOpenOpen, gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<int64_t>(absl::IntervalOpenOpen, gen, 1, 0));
constexpr auto l = (std::numeric_limits<int64_t>::min)();
constexpr auto r = (std::numeric_limits<int64_t>::max)();
EXPECT_EQ(l, absl::Uniform(gen, l, l));
EXPECT_EQ(r, absl::Uniform(gen, r, r));
EXPECT_EQ(r, absl::Uniform(gen, r, r - 1));
EXPECT_EQ(r - 1, absl::Uniform(gen, r - 1, r));
EXPECT_EQ(l, absl::Uniform(absl::IntervalOpenOpen, gen, l, l));
EXPECT_EQ(r, absl::Uniform(absl::IntervalOpenOpen, gen, r, r));
EXPECT_EQ(r, absl::Uniform(absl::IntervalOpenOpen, gen, r, r - 1));
EXPECT_EQ(r - 1, absl::Uniform(absl::IntervalOpenOpen, gen, r - 1, r));
const double e = std::nextafter(1.0, 2.0);
const double f = std::nextafter(1.0, 0.0);
const double g = std::numeric_limits<double>::denorm_min();
EXPECT_EQ(1.0, absl::Uniform(gen, 1.0, e));
EXPECT_EQ(1.0, absl::Uniform(gen, 1.0, f));
EXPECT_EQ(0.0, absl::Uniform(gen, 0.0, g));
EXPECT_EQ(e, absl::Uniform(absl::IntervalOpenOpen, gen, 1.0, e));
EXPECT_EQ(f, absl::Uniform(absl::IntervalOpenOpen, gen, 1.0, f));
EXPECT_EQ(g, absl::Uniform(absl::IntervalOpenOpen, gen, 0.0, g));
}
TEST_F(RandomDistributionsTest, UniformReal) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Uniform(gen, 0, 1.0);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.5, moments.mean, 0.02);
EXPECT_NEAR(1 / 12.0, moments.variance, 0.02);
EXPECT_NEAR(0.0, moments.skewness, 0.02);
EXPECT_NEAR(9 / 5.0, moments.kurtosis, 0.02);
}
TEST_F(RandomDistributionsTest, UniformInt) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
const int64_t kMax = 1000000000000ll;
int64_t j = absl::Uniform(absl::IntervalClosedClosed, gen, 0, kMax);
values[i] = static_cast<double>(j) / static_cast<double>(kMax);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.5, moments.mean, 0.02);
EXPECT_NEAR(1 / 12.0, moments.variance, 0.02);
EXPECT_NEAR(0.0, moments.skewness, 0.02);
EXPECT_NEAR(9 / 5.0, moments.kurtosis, 0.02);
}
TEST_F(RandomDistributionsTest, Exponential) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Exponential<double>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(1.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.025);
EXPECT_NEAR(2.0, moments.skewness, 0.1);
EXPECT_LT(5.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, PoissonDefault) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(1.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.02);
EXPECT_NEAR(1.0, moments.skewness, 0.025);
EXPECT_LT(2.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, PoissonLarge) {
constexpr double kMean = 100000000.0;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen, kMean);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(kMean, moments.mean, kMean * 0.015);
EXPECT_NEAR(kMean, moments.variance, kMean * 0.015);
EXPECT_NEAR(std::sqrt(kMean), moments.skewness, kMean * 0.02);
EXPECT_LT(2.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, Bernoulli) {
constexpr double kP = 0.5151515151;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Bernoulli(gen, kP);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(kP, moments.mean, 0.01);
}
TEST_F(RandomDistributionsTest, Beta) {
constexpr double kAlpha = 2.0;
constexpr double kBeta = 3.0;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Beta(gen, kAlpha, kBeta);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.4, moments.mean, 0.01);
}
TEST_F(RandomDistributionsTest, Zipf) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Zipf<int64_t>(gen, 100);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(6.5944, moments.mean, 2000) << moments;
}
TEST_F(RandomDistributionsTest, Gaussian) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Gaussian<double>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.04);
EXPECT_NEAR(0, moments.skewness, 0.2);
EXPECT_NEAR(3.0, moments.kurtosis, 0.5);
}
TEST_F(RandomDistributionsTest, LogUniform) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::LogUniform<int64_t>(gen, 0, (1 << 10) - 1);
}
const double mean = (0 + 1 + 1 + 2 + 3 + 4 + 7 + 8 + 15 + 16 + 31 + 32 + 63 +
64 + 127 + 128 + 255 + 256 + 511 + 512 + 1023) /
(2.0 * 11.0);
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(mean, moments.mean, 2) << moments;
}
} | template <typename R, typename URBG>
typename absl::enable_if_t<!std::numeric_limits<R>::is_signed, R>
Uniform(URBG&& urbg) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg);
} | TEST_F(RandomDistributionsTest, UniformNoBounds) {
absl::InsecureBitGen gen;
absl::Uniform<uint8_t>(gen);
absl::Uniform<uint16_t>(gen);
absl::Uniform<uint32_t>(gen);
absl::Uniform<uint64_t>(gen);
absl::Uniform<absl::uint128>(gen);
testing::StaticAssertTypeEq<uint8_t,
decltype(UniformNoBoundsReturnT<uint8_t>(0))>();
testing::StaticAssertTypeEq<uint16_t,
decltype(UniformNoBoundsReturnT<uint16_t>(0))>();
testing::StaticAssertTypeEq<uint32_t,
decltype(UniformNoBoundsReturnT<uint32_t>(0))>();
testing::StaticAssertTypeEq<uint64_t,
decltype(UniformNoBoundsReturnT<uint64_t>(0))>();
testing::StaticAssertTypeEq<
absl::uint128, decltype(UniformNoBoundsReturnT<absl::uint128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int8_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int16_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int32_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int64_t>(0))>();
testing::StaticAssertTypeEq<
Invalid, decltype(UniformNoBoundsReturnT<absl::int128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<float>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<double>(0))>();
} |
#include "tsl/platform/hash.h"
#include <string.h>
#include "tsl/platform/macros.h"
#include "tsl/platform/raw_coding.h"
#include "tsl/platform/types.h"
namespace tsl {
static inline uint32 ByteAs32(char c) { return static_cast<uint32>(c) & 0xff; }
static inline uint64 ByteAs64(char c) { return static_cast<uint64>(c) & 0xff; }
uint32 Hash32(const char* data, size_t n, uint32 seed) {
const uint32 m = 0x5bd1e995;
const int r = 24;
uint32 h = seed ^ n;
while (n >= 4) {
uint32 k = core::DecodeFixed32(data);
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
n -= 4;
}
switch (n) {
case 3:
h ^= ByteAs32(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs32(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs32(data[0]);
h *= m;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
uint64 Hash64(const char* data, size_t n, uint64 seed) {
const uint64 m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64 h = seed ^ (n * m);
while (n >= 8) {
uint64 k = core::DecodeFixed64(data);
data += 8;
n -= 8;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
switch (n) {
case 7:
h ^= ByteAs64(data[6]) << 48;
TF_FALLTHROUGH_INTENDED;
case 6:
h ^= ByteAs64(data[5]) << 40;
TF_FALLTHROUGH_INTENDED;
case 5:
h ^= ByteAs64(data[4]) << 32;
TF_FALLTHROUGH_INTENDED;
case 4:
h ^= ByteAs64(data[3]) << 24;
TF_FALLTHROUGH_INTENDED;
case 3:
h ^= ByteAs64(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs64(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs64(data[0]);
h *= m;
}
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
} | #include <map>
#include <unordered_map>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
TEST(Hash, SignedUnsignedIssue) {
const unsigned char d1[1] = {0x62};
const unsigned char d2[2] = {0xc3, 0x97};
const unsigned char d3[3] = {0xe2, 0x99, 0xa5};
const unsigned char d4[4] = {0xe1, 0x80, 0xb9, 0x32};
const unsigned char d5[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
struct Case {
uint32 hash32;
uint64 hash64;
const unsigned char* data;
size_t size;
uint32 seed;
};
for (Case c : std::vector<Case>{
{0x471a8188u, 0x4c61ea3eeda4cb87ull, nullptr, 0, 0xbc9f1d34},
{0xd615eba5u, 0x091309f7ef916c8aull, d1, sizeof(d1), 0xbc9f1d34},
{0x0c3cccdau, 0xa815bcdf1d1af01cull, d2, sizeof(d2), 0xbc9f1d34},
{0x3ba37e0eu, 0x02167564e4d06430ull, d3, sizeof(d3), 0xbc9f1d34},
{0x16174eb3u, 0x8f7ed82ffc21071full, d4, sizeof(d4), 0xbc9f1d34},
{0x98b1926cu, 0xce196580c97aff1eull, d5, sizeof(d5), 0x12345678},
}) {
EXPECT_EQ(c.hash32,
Hash32(reinterpret_cast<const char*>(c.data), c.size, c.seed));
EXPECT_EQ(c.hash64,
Hash64(reinterpret_cast<const char*>(c.data), c.size, c.seed));
for (int align = 1; align <= 7; align++) {
std::string input(align, 'x');
input.append(reinterpret_cast<const char*>(c.data), c.size);
EXPECT_EQ(c.hash32, Hash32(&input[align], c.size, c.seed));
EXPECT_EQ(c.hash64, Hash64(&input[align], c.size, c.seed));
}
}
}
TEST(Hash, HashPtrIsNotIdentityFunction) {
int* ptr = reinterpret_cast<int*>(0xcafe0000);
EXPECT_NE(hash<int*>()(ptr), size_t{0xcafe0000});
}
static void BM_Hash32(::testing::benchmark::State& state) {
int len = state.range(0);
std::string input(len, 'x');
uint32 h = 0;
for (auto s : state) {
h = Hash32(input.data(), len, 1);
}
state.SetBytesProcessed(state.iterations() * len);
VLOG(1) << h;
}
BENCHMARK(BM_Hash32)->Range(1, 1024);
TEST(StringPieceHasher, Equality) {
StringPieceHasher hasher;
StringPiece s1("foo");
StringPiece s2("bar");
StringPiece s3("baz");
StringPiece s4("zot");
EXPECT_TRUE(hasher(s1) != hasher(s2));
EXPECT_TRUE(hasher(s1) != hasher(s3));
EXPECT_TRUE(hasher(s1) != hasher(s4));
EXPECT_TRUE(hasher(s2) != hasher(s3));
EXPECT_TRUE(hasher(s2) != hasher(s4));
EXPECT_TRUE(hasher(s3) != hasher(s4));
EXPECT_TRUE(hasher(s1) == hasher(s1));
EXPECT_TRUE(hasher(s2) == hasher(s2));
EXPECT_TRUE(hasher(s3) == hasher(s3));
EXPECT_TRUE(hasher(s4) == hasher(s4));
}
TEST(StringPieceHasher, HashMap) {
string s1("foo");
string s2("bar");
string s3("baz");
StringPiece p1(s1);
StringPiece p2(s2);
StringPiece p3(s3);
std::unordered_map<StringPiece, int, StringPieceHasher> map;
map.insert(std::make_pair(p1, 0));
map.insert(std::make_pair(p2, 1));
map.insert(std::make_pair(p3, 2));
EXPECT_EQ(map.size(), 3);
bool found[3] = {false, false, false};
for (auto const& val : map) {
int x = val.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], true);
EXPECT_EQ(found[2], true);
auto new_iter = map.find("zot");
EXPECT_TRUE(new_iter == map.end());
new_iter = map.find("bar");
EXPECT_TRUE(new_iter != map.end());
map.erase(new_iter);
EXPECT_EQ(map.size(), 2);
found[0] = false;
found[1] = false;
found[2] = false;
for (const auto& iter : map) {
int x = iter.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], false);
EXPECT_EQ(found[2], true);
}
} | uint32 Hash32(const char* data, size_t n, uint32 seed) {
const uint32 m = 0x5bd1e995;
const int r = 24;
uint32 h = seed ^ n;
while (n >= 4) {
uint32 k = core::DecodeFixed32(data);
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
n -= 4;
}
switch (n) {
case 3:
h ^= ByteAs32(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs32(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs32(data[0]);
h *= m;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
} | #include <map>
#include <unordered_map>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
TEST(Hash, SignedUnsignedIssue) {
const unsigned char d1[1] = {0x62};
const unsigned char d2[2] = {0xc3, 0x97};
const unsigned char d3[3] = {0xe2, 0x99, 0xa5};
const unsigned char d4[4] = {0xe1, 0x80, 0xb9, 0x32};
const unsigned char d5[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
struct Case {
uint32 hash32;
uint64 hash64;
const unsigned char* data;
size_t size;
uint32 seed;
};
for (Case c : std::vector<Case>{
{0x471a8188u, 0x4c61ea3eeda4cb87ull, nullptr, 0, 0xbc9f1d34},
{0xd615eba5u, 0x091309f7ef916c8aull, d1, sizeof(d1), 0xbc9f1d34},
{0x0c3cccdau, 0xa815bcdf1d1af01cull, d2, sizeof(d2), 0xbc9f1d34},
{0x3ba37e0eu, 0x02167564e4d06430ull, d3, sizeof(d3), 0xbc9f1d34},
{0x16174eb3u, 0x8f7ed82ffc21071full, d4, sizeof(d4), 0xbc9f1d34},
{0x98b1926cu, 0xce196580c97aff1eull, d5, sizeof(d5), 0x12345678},
}) {
EXPECT_EQ(c.hash32,
Hash32(reinterpret_cast<const char*>(c.data), c.size, c.seed));
EXPECT_EQ(c.hash64,
Hash64(reinterpret_cast<const char*>(c.data), c.size, c.seed));
for (int align = 1; align <= 7; align++) {
std::string input(align, 'x');
input.append(reinterpret_cast<const char*>(c.data), c.size);
EXPECT_EQ(c.hash32, Hash32(&input[align], c.size, c.seed));
EXPECT_EQ(c.hash64, Hash64(&input[align], c.size, c.seed));
}
}
} |
#include "quiche/web_transport/web_transport_headers.h"
#include <array>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/common/quiche_status_utils.h"
#include "quiche/common/structured_headers.h"
namespace webtransport {
namespace {
using ::quiche::structured_headers::Dictionary;
using ::quiche::structured_headers::DictionaryMember;
using ::quiche::structured_headers::Item;
using ::quiche::structured_headers::ItemTypeToString;
using ::quiche::structured_headers::List;
using ::quiche::structured_headers::ParameterizedItem;
using ::quiche::structured_headers::ParameterizedMember;
absl::Status CheckItemType(const ParameterizedItem& item,
Item::ItemType expected_type) {
if (item.item.Type() != expected_type) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected all members to be of type ", ItemTypeToString(expected_type),
", found ", ItemTypeToString(item.item.Type()), " instead"));
}
return absl::OkStatus();
}
absl::Status CheckMemberType(const ParameterizedMember& member,
Item::ItemType expected_type) {
if (member.member_is_inner_list || member.member.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected all members to be of type", ItemTypeToString(expected_type),
", found a nested list instead"));
}
return CheckItemType(member.member[0], expected_type);
}
ABSL_CONST_INIT std::array kInitHeaderFields{
std::make_pair("u", &WebTransportInitHeader::initial_unidi_limit),
std::make_pair("bl", &WebTransportInitHeader::initial_incoming_bidi_limit),
std::make_pair("br", &WebTransportInitHeader::initial_outgoing_bidi_limit),
};
}
absl::StatusOr<std::vector<std::string>> ParseSubprotocolRequestHeader(
absl::string_view value) {
std::optional<List> parsed = quiche::structured_headers::ParseList(value);
if (!parsed.has_value()) {
return absl::InvalidArgumentError(
"Failed to parse the header as an sf-list");
}
std::vector<std::string> result;
result.reserve(parsed->size());
for (ParameterizedMember& member : *parsed) {
QUICHE_RETURN_IF_ERROR(CheckMemberType(member, Item::kTokenType));
result.push_back(std::move(member.member[0].item).TakeString());
}
return result;
}
absl::StatusOr<std::string> SerializeSubprotocolRequestHeader(
absl::Span<const std::string> subprotocols) {
for (const std::string& token : subprotocols) {
if (!quiche::structured_headers::IsValidToken(token)) {
return absl::InvalidArgumentError(absl::StrCat("Invalid token: ", token));
}
}
return absl::StrJoin(subprotocols, ", ");
}
absl::StatusOr<std::string> ParseSubprotocolResponseHeader(
absl::string_view value) {
std::optional<ParameterizedItem> parsed =
quiche::structured_headers::ParseItem(value);
if (!parsed.has_value()) {
return absl::InvalidArgumentError("Failed to parse sf-item");
}
QUICHE_RETURN_IF_ERROR(CheckItemType(*parsed, Item::kTokenType));
return std::move(parsed->item).TakeString();
}
absl::StatusOr<std::string> SerializeSubprotocolResponseHeader(
absl::string_view subprotocol) {
if (!quiche::structured_headers::IsValidToken(subprotocol)) {
return absl::InvalidArgumentError("Invalid token value supplied");
}
return std::string(subprotocol);
}
absl::StatusOr<WebTransportInitHeader> ParseInitHeader(
absl::string_view header) {
std::optional<Dictionary> parsed =
quiche::structured_headers::ParseDictionary(header);
if (!parsed.has_value()) {
return absl::InvalidArgumentError(
"Failed to parse WebTransport-Init header as an sf-dictionary");
}
WebTransportInitHeader output;
for (const auto& [field_name_a, field_value] : *parsed) {
for (const auto& [field_name_b, field_accessor] : kInitHeaderFields) {
if (field_name_a != field_name_b) {
continue;
}
QUICHE_RETURN_IF_ERROR(CheckMemberType(field_value, Item::kIntegerType));
int64_t value = field_value.member[0].item.GetInteger();
if (value < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Received negative value for ", field_name_a));
}
output.*field_accessor = value;
}
}
return output;
}
absl::StatusOr<std::string> SerializeInitHeader(
const WebTransportInitHeader& header) {
std::vector<DictionaryMember> members;
members.reserve(kInitHeaderFields.size());
for (const auto& [field_name, field_accessor] : kInitHeaderFields) {
Item item(static_cast<int64_t>(header.*field_accessor));
members.push_back(std::make_pair(
field_name, ParameterizedMember({ParameterizedItem(item, {})}, false,
{})));
}
std::optional<std::string> result =
quiche::structured_headers::SerializeDictionary(
Dictionary(std::move(members)));
if (!result.has_value()) {
return absl::InternalError("Failed to serialize the dictionary");
}
return *std::move(result);
}
} | #include "quiche/web_transport/web_transport_headers.h"
#include "absl/status/status.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace webtransport {
namespace {
using ::quiche::test::IsOkAndHolds;
using ::quiche::test::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
TEST(WebTransportHeaders, ParseSubprotocolRequestHeader) {
EXPECT_THAT(ParseSubprotocolRequestHeader("test"),
IsOkAndHolds(ElementsAre("test")));
EXPECT_THAT(ParseSubprotocolRequestHeader("moqt-draft01, moqt-draft02"),
IsOkAndHolds(ElementsAre("moqt-draft01", "moqt-draft02")));
EXPECT_THAT(ParseSubprotocolRequestHeader("moqt-draft01; a=b, moqt-draft02"),
IsOkAndHolds(ElementsAre("moqt-draft01", "moqt-draft02")));
EXPECT_THAT(ParseSubprotocolRequestHeader("moqt-draft01, moqt-draft02; a=b"),
IsOkAndHolds(ElementsAre("moqt-draft01", "moqt-draft02")));
EXPECT_THAT(ParseSubprotocolRequestHeader("\"test\""),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found string instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("42"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found integer instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("a, (b)"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found a nested list instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("a, (b c)"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found a nested list instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("foo, ?1, bar"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found boolean instead")));
EXPECT_THAT(ParseSubprotocolRequestHeader("(a"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("parse the header as an sf-list")));
}
TEST(WebTransportHeaders, SerializeSubprotocolRequestHeader) {
EXPECT_THAT(SerializeSubprotocolRequestHeader({"test"}),
IsOkAndHolds("test"));
EXPECT_THAT(SerializeSubprotocolRequestHeader({"foo", "bar"}),
IsOkAndHolds("foo, bar"));
EXPECT_THAT(SerializeSubprotocolRequestHeader({"moqt-draft01", "a/b/c"}),
IsOkAndHolds("moqt-draft01, a/b/c"));
EXPECT_THAT(
SerializeSubprotocolRequestHeader({"abcd", "0123", "efgh"}),
StatusIs(absl::StatusCode::kInvalidArgument, "Invalid token: 0123"));
}
TEST(WebTransportHeader, ParseSubprotocolResponseHeader) {
EXPECT_THAT(ParseSubprotocolResponseHeader("foo"), IsOkAndHolds("foo"));
EXPECT_THAT(ParseSubprotocolResponseHeader("foo; a=b"), IsOkAndHolds("foo"));
EXPECT_THAT(
ParseSubprotocolResponseHeader("1234"),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("found integer")));
EXPECT_THAT(
ParseSubprotocolResponseHeader("(a"),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("parse sf-item")));
}
TEST(WebTransportHeader, SerializeSubprotocolResponseHeader) {
EXPECT_THAT(SerializeSubprotocolResponseHeader("foo"), IsOkAndHolds("foo"));
EXPECT_THAT(SerializeSubprotocolResponseHeader("moqt-draft01"),
IsOkAndHolds("moqt-draft01"));
EXPECT_THAT(SerializeSubprotocolResponseHeader("123abc"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(WebTransportHeader, ParseInitHeader) {
WebTransportInitHeader expected_header;
expected_header.initial_unidi_limit = 100;
expected_header.initial_incoming_bidi_limit = 200;
expected_header.initial_outgoing_bidi_limit = 400;
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=100"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=300, bl=200, u=100, br=400"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=400, bl=200; foo=bar, u=100"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=100.0"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found decimal instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=?1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found boolean instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=(a b)"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found a nested list instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=:abcd:"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found byte sequence instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=-1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("negative value")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=18446744073709551615"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Failed to parse")));
}
TEST(WebTransportHeaders, SerializeInitHeader) {
EXPECT_THAT(SerializeInitHeader(WebTransportInitHeader{}),
IsOkAndHolds("u=0, bl=0, br=0"));
WebTransportInitHeader test_header;
test_header.initial_unidi_limit = 100;
test_header.initial_incoming_bidi_limit = 200;
test_header.initial_outgoing_bidi_limit = 400;
EXPECT_THAT(SerializeInitHeader(test_header),
IsOkAndHolds("u=100, bl=200, br=400"));
}
}
} | absl::StatusOr<WebTransportInitHeader> ParseInitHeader(
absl::string_view header) {
std::optional<Dictionary> parsed =
quiche::structured_headers::ParseDictionary(header);
if (!parsed.has_value()) {
return absl::InvalidArgumentError(
"Failed to parse WebTransport-Init header as an sf-dictionary");
}
WebTransportInitHeader output;
for (const auto& [field_name_a, field_value] : *parsed) {
for (const auto& [field_name_b, field_accessor] : kInitHeaderFields) {
if (field_name_a != field_name_b) {
continue;
}
QUICHE_RETURN_IF_ERROR(CheckMemberType(field_value, Item::kIntegerType));
int64_t value = field_value.member[0].item.GetInteger();
if (value < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Received negative value for ", field_name_a));
}
output.*field_accessor = value;
}
}
return output;
} | TEST(WebTransportHeader, ParseInitHeader) {
WebTransportInitHeader expected_header;
expected_header.initial_unidi_limit = 100;
expected_header.initial_incoming_bidi_limit = 200;
expected_header.initial_outgoing_bidi_limit = 400;
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=100"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=300, bl=200, u=100, br=400"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=400, bl=200; foo=bar, u=100"),
IsOkAndHolds(expected_header));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=100.0"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found decimal instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=?1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found boolean instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=(a b)"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found a nested list instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=:abcd:"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("found byte sequence instead")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=-1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("negative value")));
EXPECT_THAT(ParseInitHeader("br=400, bl=200, u=18446744073709551615"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Failed to parse")));
} |
#include "tensorflow/core/framework/op_segment.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
OpSegment::Item::~Item() {
for (const auto& kv : name_kernel) delete kv.second;
}
OpSegment::OpSegment() {}
OpSegment::~OpSegment() {
for (const auto& kv : sessions_) delete kv.second;
}
Status OpSegment::FindOrCreate(const string& session_handle,
const string& node_name, OpKernel** kernel,
CreateKernelFn create_fn) {
{
mutex_lock l(mu_);
auto item = gtl::FindPtrOrNull(sessions_, session_handle);
if (item == nullptr) {
return errors::NotFound("Session ", session_handle, " is not found.");
}
*kernel = gtl::FindPtrOrNull(item->name_kernel, node_name);
if (*kernel != nullptr) {
return OkStatus();
}
}
Status s = create_fn(kernel);
if (!s.ok()) {
LOG(ERROR) << "Create kernel failed: " << s;
return s;
}
{
mutex_lock l(mu_);
auto item = gtl::FindPtrOrNull(sessions_, session_handle);
if (item == nullptr) {
return errors::NotFound("Session ", session_handle, " is not found.");
}
OpKernel** p_kernel = &(item->name_kernel[node_name]);
if (*p_kernel == nullptr) {
*p_kernel = *kernel;
} else {
delete *kernel;
*kernel = *p_kernel;
}
}
return OkStatus();
}
void OpSegment::AddHold(const string& session_handle) {
mutex_lock l(mu_);
Item** item = &sessions_[session_handle];
if (*item == nullptr) {
*item = new Item;
} else {
++((*item)->num_holds);
}
}
void OpSegment::RemoveHold(const string& session_handle) {
Item* item = nullptr;
{
mutex_lock l(mu_);
auto siter = sessions_.find(session_handle);
if (siter == sessions_.end()) {
VLOG(1) << "Session " << session_handle << " is not found.";
return;
}
item = siter->second;
if (--(item->num_holds) > 0) {
return;
} else {
sessions_.erase(siter);
}
}
delete item;
}
bool OpSegment::ShouldOwnKernel(FunctionLibraryRuntime* lib,
const string& node_op) {
return lib->IsStateful(node_op) &&
lib->GetFunctionLibraryDefinition()->Find(node_op) == nullptr &&
node_op != "PartitionedCall" && node_op != "StatefulPartitionedCall";
}
} | #include "tensorflow/core/framework/op_segment.h"
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class OpSegmentTest : public ::testing::Test {
protected:
DeviceBase device_;
std::vector<NodeDef> int32_nodedefs_;
std::vector<NodeDef> float_nodedefs_;
OpSegmentTest() : device_(Env::Default()) {
for (int i = 0; i < 10; ++i) {
NodeDef def;
TF_CHECK_OK(NodeDefBuilder(strings::StrCat("op", i), "Mul")
.Input("x", 0, DT_INT32)
.Input("y", 0, DT_INT32)
.Finalize(&def));
int32_nodedefs_.push_back(def);
TF_CHECK_OK(NodeDefBuilder(strings::StrCat("op", i), "Mul")
.Input("x", 0, DT_FLOAT)
.Input("y", 0, DT_FLOAT)
.Finalize(&def));
float_nodedefs_.push_back(def);
}
}
void ValidateOpAndTypes(OpKernel* op, const NodeDef& expected, DataType dt) {
ASSERT_NE(op, nullptr);
EXPECT_EQ(expected.DebugString(), op->def().DebugString());
EXPECT_EQ(2, op->num_inputs());
EXPECT_EQ(dt, op->input_type(0));
EXPECT_EQ(dt, op->input_type(1));
EXPECT_EQ(1, op->num_outputs());
EXPECT_EQ(dt, op->output_type(0));
}
OpSegment::CreateKernelFn GetFn(const NodeDef* ndef) {
return [this, ndef](OpKernel** kernel) {
Status s;
auto created = CreateOpKernel(DEVICE_CPU, &device_, cpu_allocator(),
*ndef, TF_GRAPH_DEF_VERSION, &s);
if (s.ok()) {
*kernel = created.release();
}
return s;
};
}
};
TEST_F(OpSegmentTest, Basic) {
OpSegment opseg;
OpKernel* op;
opseg.AddHold("A");
opseg.AddHold("B");
for (int i = 0; i < 10; ++i) {
auto* ndef = &float_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("A", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_FLOAT);
ndef = &int32_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("B", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_INT32);
}
auto reterr = [](OpKernel** kernel) {
return errors::Internal("Should not be called");
};
for (int i = 0; i < 10; ++i) {
TF_EXPECT_OK(
opseg.FindOrCreate("A", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, float_nodedefs_[i], DT_FLOAT);
TF_EXPECT_OK(
opseg.FindOrCreate("B", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, int32_nodedefs_[i], DT_INT32);
}
opseg.RemoveHold("A");
opseg.RemoveHold("B");
}
TEST_F(OpSegmentTest, SessionNotFound) {
OpSegment opseg;
OpKernel* op;
NodeDef def = float_nodedefs_[0];
Status s = opseg.FindOrCreate("A", def.name(), &op, GetFn(&def));
EXPECT_TRUE(errors::IsNotFound(s)) << s;
}
TEST_F(OpSegmentTest, CreateFailure) {
OpSegment opseg;
OpKernel* op;
NodeDef def = float_nodedefs_[0];
def.set_op("nonexistop");
opseg.AddHold("A");
Status s = opseg.FindOrCreate("A", def.name(), &op, GetFn(&def));
EXPECT_TRUE(errors::IsNotFound(s)) << s;
opseg.RemoveHold("A");
}
TEST_F(OpSegmentTest, AddRemoveHolds) {
OpSegment opseg;
OpKernel* op;
const auto& ndef = int32_nodedefs_[0];
opseg.RemoveHold("null");
opseg.AddHold("foo");
TF_EXPECT_OK(opseg.FindOrCreate("foo", ndef.name(), &op, GetFn(&ndef)));
opseg.AddHold("foo");
opseg.RemoveHold("foo");
ValidateOpAndTypes(op, ndef, DT_INT32);
opseg.RemoveHold("foo");
}
} | void OpSegment::AddHold(const string& session_handle) {
mutex_lock l(mu_);
Item** item = &sessions_[session_handle];
if (*item == nullptr) {
*item = new Item;
} else {
++((*item)->num_holds);
}
} | TEST_F(OpSegmentTest, Basic) {
OpSegment opseg;
OpKernel* op;
opseg.AddHold("A");
opseg.AddHold("B");
for (int i = 0; i < 10; ++i) {
auto* ndef = &float_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("A", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_FLOAT);
ndef = &int32_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("B", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_INT32);
}
auto reterr = [](OpKernel** kernel) {
return errors::Internal("Should not be called");
};
for (int i = 0; i < 10; ++i) {
TF_EXPECT_OK(
opseg.FindOrCreate("A", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, float_nodedefs_[i], DT_FLOAT);
TF_EXPECT_OK(
opseg.FindOrCreate("B", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, int32_nodedefs_[i], DT_INT32);
}
opseg.RemoveHold("A");
opseg.RemoveHold("B");
}
TEST_F(OpSegmentTest, CreateFailure) {
OpSegment opseg;
OpKernel* op;
NodeDef def = float_nodedefs_[0];
def.set_op("nonexistop");
opseg.AddHold("A");
Status s = opseg.FindOrCreate("A", def.name(), &op, GetFn(&def));
EXPECT_TRUE(errors::IsNotFound(s)) << s;
opseg.RemoveHold("A");
}
TEST_F(OpSegmentTest, AddRemoveHolds) {
OpSegment opseg;
OpKernel* op;
const auto& ndef = int32_nodedefs_[0];
opseg.RemoveHold("null");
opseg.AddHold("foo");
TF_EXPECT_OK(opseg.FindOrCreate("foo", ndef.name(), &op, GetFn(&ndef)));
opseg.AddHold("foo");
opseg.RemoveHold("foo");
ValidateOpAndTypes(op, ndef, DT_INT32);
opseg.RemoveHold("foo");
} |
#ifndef TENSORFLOW_TSL_PROFILER_LIB_TRACEME_ENCODE_H_
#define TENSORFLOW_TSL_PROFILER_LIB_TRACEME_ENCODE_H_
#include <string.h>
#include <initializer_list>
#include <string>
#include "absl/base/attributes.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tsl {
namespace profiler {
struct TraceMeArg {
TraceMeArg(absl::string_view k,
const absl::AlphaNum& v ABSL_ATTRIBUTE_LIFETIME_BOUND)
: key(k), value(v.Piece()) {}
TraceMeArg(const TraceMeArg&) = delete;
void operator=(const TraceMeArg&) = delete;
absl::string_view key;
absl::string_view value;
};
namespace traceme_internal {
TF_ATTRIBUTE_ALWAYS_INLINE inline char* Append(char* out,
absl::string_view str) {
DCHECK(!absl::StrContains(str, '#'))
<< "'#' is not a valid character in TraceMeEncode";
const size_t str_size = str.size();
if (TF_PREDICT_TRUE(str_size > 0)) {
memcpy(out, str.data(), str_size);
out += str_size;
}
return out;
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string AppendArgs(
std::string name, std::initializer_list<TraceMeArg> args) {
if (TF_PREDICT_TRUE(args.size() > 0)) {
const auto old_size = name.size();
auto new_size = old_size + args.size() * 2 + 1;
for (const auto& arg : args) {
new_size += arg.key.size() + arg.value.size();
}
name.resize(new_size);
char* const begin = &name[0];
char* out = begin + old_size;
*out++ = '#';
for (const auto& arg : args) {
out = Append(out, arg.key);
*out++ = '=';
out = Append(out, arg.value);
*out++ = ',';
}
*(out - 1) = '#';
DCHECK_EQ(out, begin + new_size);
}
return name;
}
TF_ATTRIBUTE_ALWAYS_INLINE inline void AppendMetadata(
std::string* name, absl::string_view new_metadata) {
if (!TF_PREDICT_FALSE(new_metadata.empty())) {
if (!name->empty() && name->back() == '#') {
name->back() = ',';
if (TF_PREDICT_TRUE(new_metadata.front() == '#')) {
new_metadata.remove_prefix(1);
}
}
name->append(new_metadata.data(), new_metadata.size());
}
}
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode(
std::string name, std::initializer_list<TraceMeArg> args) {
return traceme_internal::AppendArgs(std::move(name), args);
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode(
absl::string_view name, std::initializer_list<TraceMeArg> args) {
return traceme_internal::AppendArgs(std::string(name), args);
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode(
const char* name, std::initializer_list<TraceMeArg> args) {
return traceme_internal::AppendArgs(std::string(name), args);
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode(
std::initializer_list<TraceMeArg> args) {
return traceme_internal::AppendArgs(std::string(), args);
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOp(
absl::string_view op_name, absl::string_view op_type) {
return absl::StrCat(op_name, ":", op_type);
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOp(const char* op_name,
const char* op_type) {
return absl::StrCat(op_name, ":", op_type);
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOp(
std::string&& op_name, absl::string_view op_type) {
absl::StrAppend(&op_name, ":", op_type);
return op_name;
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOpOverride(
absl::string_view op_name, absl::string_view op_type) {
return absl::StrCat("#tf_op=", op_name, ":", op_type, "#");
}
TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeOpOverride(
const char* op_name, const char* op_type) {
return absl::StrCat("#tf_op=", op_name, ":", op_type, "#");
}
}
}
#endif | #include "tsl/profiler/lib/traceme_encode.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace profiler {
namespace {
TEST(TraceMeEncodeTest, NoArgTest) {
EXPECT_EQ(TraceMeEncode("Hello!", {}), "Hello!");
}
TEST(TraceMeEncodeTest, OneArgTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"}}),
"Hello#context=World#");
}
TEST(TraceMeEncodeTest, TwoArgsTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"}, {"request_id", 42}}),
"Hello#context=World,request_id=42#");
}
TEST(TraceMeEncodeTest, ThreeArgsTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"},
{"request_id", 42},
{"addr", absl::Hex(0xdeadbeef)}}),
"Hello#context=World,request_id=42,addr=deadbeef#");
}
#if !defined(PLATFORM_WINDOWS)
TEST(TraceMeEncodeTest, TemporaryStringTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{std::string("context"),
absl::StrCat("World:", 2020)}}),
"Hello#context=World:2020#");
}
#endif
#if defined(PLATFORM_GOOGLE)
struct Point {
template <typename Sink>
friend void AbslStringify(Sink& sink, const Point& p) {
absl::Format(&sink, "(%d, %d)", p.x, p.y);
}
int x;
int y;
};
TEST(TraceMeEncodeTest, AbslStringifyTest) {
EXPECT_EQ(TraceMeEncode("Plot", {{"point", Point{10, 20}}}),
"Plot#point=(10, 20)#");
}
#endif
TEST(TraceMeEncodeTest, NoNameTest) {
EXPECT_EQ(TraceMeEncode({{"context", "World"}, {"request_id", 42}}),
"#context=World,request_id=42#");
}
}
void BM_TraceMeEncode(::testing::benchmark::State& state) {
for (auto s : state) {
TraceMeEncode(
"MyTestEvent",
{{"Lorem ipsum dolor sit amet", 1},
{"consectetur adipiscing elit", 2},
{"sed do eiusmod tempor incididunt", 3.52},
{"ut labore et dolore magna aliqua", "Ut enim ad minim veniam"},
{"quis nostrud exercitation ullamco", "laboris nisi ut aliquip ex"},
{"ea commodo consequat.", 11111.1111},
{"Duis aute", 1234567890},
{"irure dolor in", " reprehenderit in voluptate"},
{"velit esse cillum dolore", "eu fugiat nulla pariatur."},
{"Excepteur sint", "occaecat cupidatat non proident, sunt in"},
{"culpa qui officia", "deserunt mollit anim id est laborum."}});
}
}
BENCHMARK(BM_TraceMeEncode);
}
} | TF_ATTRIBUTE_ALWAYS_INLINE inline std::string TraceMeEncode(
absl::string_view name, std::initializer_list<TraceMeArg> args) {
return traceme_internal::AppendArgs(std::string(name), args);
} | TEST(TraceMeEncodeTest, OneArgTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"}}),
"Hello#context=World#");
}
TEST(TraceMeEncodeTest, TwoArgsTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"}, {"request_id", 42}}),
"Hello#context=World,request_id=42#");
}
TEST(TraceMeEncodeTest, ThreeArgsTest) {
EXPECT_EQ(TraceMeEncode("Hello", {{"context", "World"},
{"request_id", 42},
{"addr", absl::Hex(0xdeadbeef)}}),
"Hello#context=World,request_id=42,addr=deadbeef#");
} |
#include "xla/tools/xla_compile_lib.h"
#include <cmath>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/duration.pb.h"
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "stablehlo/dialect/Register.h"
#include "xla/client/xla_computation.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/service/compiler.h"
#include "xla/service/cpu/cpu_compiler.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/executable.h"
#include "xla/service/export_hlo.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/symbol_repository.h"
#include "xla/service/xla_compile_result.pb.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tools/hlo_module_loader.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/gpu/gpu_symbol_repository.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#endif
#if GOOGLE_CUDA
#include "xla/service/gpu/nvptx_compiler.h"
#elif TENSORFLOW_USE_ROCM
#include "xla/service/gpu/amdgpu_compiler.h"
#endif
namespace xla {
static absl::StatusOr<std::string> AotCompileCpuExecutable(
std::unique_ptr<HloModule> hlo_module) {
cpu::CpuCompiler cpu_compiler;
auto module_group = std::make_unique<HloModuleGroup>(std::move(hlo_module));
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<Executable>> executables,
cpu_compiler.Compile(std::move(module_group), {{nullptr}}, {nullptr}));
TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result,
cpu_compiler.Export(executables[0].get()));
return aot_result->SerializeAsString();
}
static absl::StatusOr<std::string> CompileGpuExecutable(
std::unique_ptr<HloModule> hlo_module,
std::optional<Compiler::TargetConfig> target_config,
CompilationResult& result) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
const bool aot = target_config.has_value();
#if GOOGLE_CUDA
auto gpu_compiler = gpu::NVPTXCompiler();
#elif TENSORFLOW_USE_ROCM
auto gpu_compiler = gpu::AMDGPUCompiler();
#endif
auto module_group = std::make_unique<HloModuleGroup>(std::move(hlo_module));
if (aot) {
AotCompilationOptions aot_options(gpu_compiler.PlatformId());
aot_options.set_target_config(*target_config);
aot_options.set_run_backend_only(true);
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<AotCompilationResult>> aot_results,
gpu_compiler.CompileAheadOfTime(std::move(module_group), aot_options));
TF_ASSIGN_OR_RETURN(std::string compile_result,
aot_results[0]->SerializeAsString());
*result.mutable_hlo_module() =
aot_results[0]->optimized_module()->ToProto();
return compile_result;
}
Compiler::CompileOptions compile_options;
TF_RETURN_IF_ERROR(stream_executor::ValidateGPUMachineManager());
TF_ASSIGN_OR_RETURN(
stream_executor::StreamExecutor * stream_executor,
stream_executor::GPUMachineManager()->ExecutorForDevice(0));
auto allocator =
std::make_unique<stream_executor::StreamExecutorMemoryAllocator>(
stream_executor);
compile_options.device_allocator = allocator.get();
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<Executable>> executables,
gpu_compiler.Compile(std::move(module_group), {{stream_executor}},
compile_options));
*result.mutable_hlo_module() = executables[0]->module().ToProto();
return executables[0]->module().ToString();
#else
LOG(ERROR) << "Neither ROCm nor CUDA present; returning empty.";
return "";
#endif
}
absl::StatusOr<std::string> CompileExecutable(
std::unique_ptr<HloModule> hlo_module, BackendType backend,
std::optional<Compiler::TargetConfig> target_config,
CompilationResult& result) {
if (backend == BackendType::kCpu) {
return AotCompileCpuExecutable(std::move(hlo_module));
}
return CompileGpuExecutable(std::move(hlo_module), std::move(target_config),
result);
}
absl::Status WriteResultFile(const absl::string_view result_output_file,
TimerStats& stats,
CompilationResult& compilation_result) {
if (result_output_file.empty()) {
return absl::OkStatus();
}
absl::MutexLock ml(&stats.stats_mutex);
const double secs = std::floor(stats.cumulative_secs);
const double nanos =
(stats.cumulative_secs - secs) * tsl::EnvTime::kSecondsToNanos;
google::protobuf::Duration duration;
duration.set_seconds(secs);
duration.set_nanos(nanos);
*compilation_result.mutable_perf_stats()->mutable_compilation_duration() =
duration;
*compilation_result.mutable_perf_stats()->mutable_total_duration() = duration;
return tsl::WriteBinaryProto(
tsl::Env::Default(), std::string(result_output_file), compilation_result);
}
absl::StatusOr<std::unique_ptr<HloModule>> LoadModule(
const absl::string_view module_path) {
auto format = std::string(tsl::io::Extension(module_path));
if (format == "hlo" || format == "txt" || format == "pb") {
return LoadModuleFromFile(
std::string(module_path), format, hlo_module_loader_details::Config(),
[&](HloModuleConfig* c) {}, nullptr);
}
std::string module_string;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
tsl::Env::Default(), std::string(module_path), &module_string));
mlir::DialectRegistry dialects;
dialects.insert<mlir::arith::ArithDialect>();
dialects.insert<mlir::mhlo::MhloDialect>();
dialects.insert<mlir::func::FuncDialect>();
mlir::stablehlo::registerAllDialects(dialects);
auto threading = mlir::MLIRContext::Threading::DISABLED;
auto ctx = std::make_unique<mlir::MLIRContext>(dialects, threading);
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(module_string, ctx.get());
XlaComputation xla_computation;
TF_RETURN_IF_ERROR(
MlirToXlaComputation(*module, xla_computation, false, false));
HloModuleProto hlo_module_proto = xla_computation.proto();
TF_ASSIGN_OR_RETURN(ProgramShape shape, xla_computation.GetProgramShape());
DebugOptions debug_options = GetDebugOptionsFromFlags();
HloModuleConfig config(shape);
config.set_debug_options(debug_options);
return HloModule::CreateFromProto(hlo_module_proto, config);
}
static absl::StatusOr<std::unique_ptr<HloModuleAndMetadata>>
ReadModuleFromSymbolRepo(absl::string_view symbol_repo,
absl::string_view symbol_reference,
BackendType backend) {
std::unique_ptr<HloModuleAndMetadata> mod;
TF_ASSIGN_OR_RETURN(
mod, LookupSymbolInRepository(symbol_repo, symbol_reference, backend));
if (mod == nullptr) {
return absl::NotFoundError(
absl::StrCat("Could not find ", symbol_reference, " in ", symbol_repo));
}
return mod;
}
static absl::StatusOr<bool> LoadAutotuneDataFromModule(
HloModuleAndMetadata* mod, BackendType backend) {
if (backend == BackendType::kGpu) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (auto* data = static_cast<gpu::GpuBackendSpecificData*>(
mod->backend_specific_data.get());
data != nullptr && data->autotune_results.has_value()) {
TF_RETURN_IF_ERROR(
gpu::AutotunerUtil::LoadAutotuneResults(*data->autotune_results));
return true;
}
#endif
}
return false;
}
static std::unique_ptr<Compiler::TargetConfig> ReadTargetConfigFromModule(
HloModuleAndMetadata* mod, BackendType backend) {
if (backend == BackendType::kGpu) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (auto* data = static_cast<gpu::GpuBackendSpecificData*>(
mod->backend_specific_data.get());
data != nullptr) {
return std::move(mod->target_config);
}
#endif
}
return nullptr;
}
absl::Status XlaCompileMain(const XlaCompileOptions& options) {
std::unique_ptr<HloModule> hlo_module;
std::unique_ptr<Compiler::TargetConfig> target_config;
if (options.platform != "cpu" && options.platform != "gpu") {
return absl::UnimplementedError(
absl::StrCat("platform", options.platform, " is not supported"));
}
const BackendType backend =
(options.platform == "gpu" ? BackendType::kGpu : BackendType::kCpu);
absl::string_view symbol_repo = options.repo_options.symbol_repo;
if (absl::string_view symbol_id = options.repo_options.symbol_id;
!symbol_id.empty()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModuleAndMetadata> mod,
ReadModuleFromSymbolRepo(symbol_repo, symbol_id, backend));
hlo_module = std::move(mod->hlo_module);
target_config = ReadTargetConfigFromModule(mod.get(), backend);
} else {
TF_ASSIGN_OR_RETURN(hlo_module, LoadModule(options.module_path));
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
bool found_autotune = false;
#endif
if (absl::string_view optimized_symbol_id =
options.repo_options.optimized_symbol_id;
!optimized_symbol_id.empty()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModuleAndMetadata> optimized_mod,
ReadModuleFromSymbolRepo(symbol_repo, optimized_symbol_id, backend));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_ASSIGN_OR_RETURN(found_autotune, LoadAutotuneDataFromModule(
optimized_mod.get(), backend));
#endif
}
xla::TimerStats stats;
xla::ScopedLoggingTimer timer("compilation", true, "xla_compile_main.cc", 1,
&stats);
CompilationResult compilation_result;
absl::Cleanup cleanup([&] {
timer.StopAndLog();
if (!options.result_output_file.empty()) {
TF_QCHECK_OK(WriteResultFile(options.result_output_file, stats,
compilation_result));
}
});
std::optional<Compiler::TargetConfig> cfg = std::nullopt;
if (backend == BackendType::kGpu) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (absl::string_view gpu_target_config_path =
options.gpu_options.gpu_target_config_path;
!gpu_target_config_path.empty()) {
std::string gpu_target_config_string;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
tsl::Env::Default(), std::string(gpu_target_config_path),
&gpu_target_config_string));
stream_executor::GpuTargetConfigProto gpu_target_config_proto;
if (!tsl::protobuf::TextFormat::ParseFromString(
gpu_target_config_string, &gpu_target_config_proto)) {
return FailedPrecondition("Failed to parse GpuTargetConfigProto");
}
target_config =
std::make_unique<Compiler::TargetConfig>(gpu_target_config_proto);
if (absl::string_view autotune_results_path =
options.gpu_options.autotune_results_path;
!found_autotune && !autotune_results_path.empty()) {
TF_RETURN_IF_ERROR(gpu::AutotunerUtil::LoadAutotuneResultsFromFile(
autotune_results_path));
}
}
cfg = (options.gpu_options.use_attached_device)
? std::nullopt
: std::make_optional(*std::move(target_config));
#endif
}
auto result = CompileExecutable(std::move(hlo_module), backend,
std::move(cfg), compilation_result);
*compilation_result.mutable_status() = tsl::StatusToProto(result.status());
if (!result.ok()) {
return result.status();
}
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(),
options.output_path, *result));
if (options.repo_options.wait_for_uploads) {
MaybeWaitForUploads();
}
return absl::OkStatus();
}
} | #include "xla/tools/xla_compile_lib.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "google/protobuf/duration.pb.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/platform_util.h"
#include "xla/service/symbol_repository.h"
#include "xla/service/xla_compile_result.pb.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
#include "tsl/protobuf/status.pb.h"
namespace xla {
namespace {
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::Not;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
#if XLA_TEST_BACKEND_CPU
static constexpr absl::string_view kPlatformName = "Host";
#elif XLA_TEST_BACKEND_GPU
static constexpr absl::string_view kPlatformName =
#if TENSORFLOW_USE_ROCM
"ROCM";
#else
"CUDA";
#endif
#endif
class XlaCompileLibTest : public HloTestBase {
protected:
XlaCompileLibTest()
: HloTestBase(*PlatformUtil::GetPlatform(std::string(kPlatformName)),
GetReferencePlatform()) {}
void SetUp() override {
const std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(),
"tools", "data", "add.hlo");
std::string hlo;
TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), hlo_path, &hlo));
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo));
}
std::unique_ptr<HloModule> module_;
};
TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(CompilesForCpu)) {
CompilationResult result;
EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kCpu,
std::nullopt, result),
IsOkAndHolds(Not(IsEmpty())));
}
TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(CompilesForGpuWithDevice)) {
CompilationResult result;
EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kGpu,
std::nullopt, result),
IsOkAndHolds(Not(IsEmpty())));
EXPECT_TRUE(result.has_hlo_module()) << result.DebugString();
}
TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(CompilesForGpuWithoutDevice)) {
const std::string target_config_path =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service",
"xla_aot_compile_test_gpu_target_config.prototxt");
stream_executor::GpuTargetConfigProto target_config;
TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), target_config_path,
&target_config));
CompilationResult result;
EXPECT_THAT(CompileExecutable(std::move(module_), BackendType::kGpu,
std::nullopt, result),
IsOkAndHolds(Not(IsEmpty())));
EXPECT_TRUE(result.has_hlo_module()) << result.DebugString();
}
TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(ErrorsOnUnexpectedPlatform)) {
XlaCompileOptions options;
options.platform = "tpu";
EXPECT_THAT(XlaCompileMain(options), StatusIs(tsl::error::UNIMPLEMENTED));
}
TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(WriteResultFilePropagatesErrors)) {
TimerStats stats;
CompilationResult result;
EXPECT_THAT(WriteResultFile("/does/not/exist", stats, result), Not(IsOk()));
}
TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(WriteResultFileWritesTheFile)) {
std::string result_output_file;
ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&result_output_file));
TimerStats stats;
{
absl::MutexLock ml(&stats.stats_mutex);
stats.cumulative_secs = 5.5;
stats.max_secs = 5.5;
}
CompilationResult result;
google::protobuf::Duration duration;
duration.set_seconds(5);
duration.set_nanos(0.5 * tsl::EnvTime::kSecondsToNanos);
*result.mutable_perf_stats()->mutable_compilation_duration() = duration;
*result.mutable_perf_stats()->mutable_total_duration() = duration;
TF_ASSERT_OK(WriteResultFile(result_output_file, stats, result));
CompilationResult got_result;
TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_output_file,
&got_result));
EXPECT_EQ(5, got_result.perf_stats().compilation_duration().seconds());
EXPECT_EQ(0.5 * tsl::EnvTime::kSecondsToNanos,
got_result.perf_stats().compilation_duration().nanos());
EXPECT_EQ(5, got_result.perf_stats().total_duration().seconds());
EXPECT_EQ(0.5 * tsl::EnvTime::kSecondsToNanos,
got_result.perf_stats().total_duration().nanos());
}
TEST_F(XlaCompileLibTest, LoadModuleErrors) {
EXPECT_THAT(LoadModule("/does/not/exist"), Not(IsOk()));
}
TEST_F(XlaCompileLibTest, LoadModuleLoadsTextFormat) {
const std::string module_file =
tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt");
TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file,
module_->ToString()));
EXPECT_THAT(LoadModule(module_file), IsOkAndHolds(Not(IsNull())));
}
TEST_F(XlaCompileLibTest, DISABLED_ON_GPU(MainForCpu)) {
const std::string module_file =
tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt");
TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file,
module_->ToString()));
const std::string output_path =
tsl::io::JoinPath(tsl::testing::TmpDir(), "cpu_output");
const std::string result_file =
tsl::io::JoinPath(tsl::testing::TmpDir(), "cpu_result.pb");
XlaCompileOptions options;
options.module_path = module_file;
options.output_path = output_path;
options.platform = "cpu";
options.result_output_file = result_file;
TF_EXPECT_OK(XlaCompileMain(options));
CompilationResult result;
TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_file, &result));
EXPECT_TRUE(result.has_status());
EXPECT_EQ(result.status().code(), tensorflow::error::OK);
}
TEST_F(XlaCompileLibTest, DISABLED_ON_CPU(MainForGpu)) {
const std::string module_file =
tsl::io::JoinPath(tsl::testing::TmpDir(), "module.txt");
TF_ASSERT_OK(tsl::WriteStringToFile(tsl::Env::Default(), module_file,
module_->ToString()));
const std::string output_path =
tsl::io::JoinPath(tsl::testing::TmpDir(), "gpu_output");
const std::string result_file =
tsl::io::JoinPath(tsl::testing::TmpDir(), "gpu_result.pb");
XlaCompileOptions options;
options.module_path = module_file;
options.output_path = output_path;
options.platform = "gpu";
options.result_output_file = result_file;
options.gpu_options.use_attached_device = true;
TF_EXPECT_OK(XlaCompileMain(options));
CompilationResult result;
TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), result_file, &result));
EXPECT_TRUE(result.has_status());
EXPECT_EQ(result.status().code(), tensorflow::error::OK);
}
}
} | absl::StatusOr<std::string> CompileExecutable(
std::unique_ptr<HloModule> hlo_module, BackendType backend,
std::optional<Compiler::TargetConfig> target_config,
CompilationResult& result) {
if (backend == BackendType::kCpu) {
return AotCompileCpuExecutable(std::move(hlo_module));
}
return CompileGpuExecutable(std::move(hlo_module), std::move(target_config),
result);
} | |
#include "tensorstore/open_mode.h"
#include <ostream>
#include "absl/status/status.h"
namespace tensorstore {
std::string_view to_string(ReadWriteMode mode) {
switch (mode) {
case ReadWriteMode::dynamic:
return "dynamic";
case ReadWriteMode::read:
return "read";
case ReadWriteMode::write:
return "write";
case ReadWriteMode::read_write:
return "read_write";
default:
return "<unknown>";
}
}
std::ostream& operator<<(std::ostream& os, ReadWriteMode mode) {
return os << to_string(mode);
}
std::ostream& operator<<(std::ostream& os, OpenMode mode) {
const char* sep = "";
constexpr const char* kSep = "|";
if (!!(mode & OpenMode::open)) {
os << "open";
sep = kSep;
}
if (!!(mode & OpenMode::create)) {
os << sep << "create";
sep = kSep;
}
if (!!(mode & OpenMode::delete_existing)) {
os << sep << "delete_existing";
sep = kSep;
}
if (!!(mode & OpenMode::assume_metadata)) {
os << sep << "assume_metadata";
sep = kSep;
}
return os;
}
namespace internal {
absl::Status ValidateSupportsRead(ReadWriteMode mode) {
return !(mode & ReadWriteMode::read)
? absl::InvalidArgumentError("Source does not support reading.")
: absl::Status();
}
absl::Status ValidateSupportsWrite(ReadWriteMode mode) {
return !(mode & ReadWriteMode::write)
? absl::InvalidArgumentError(
"Destination does not support writing.")
: absl::Status();
}
absl::Status ValidateSupportsModes(ReadWriteMode mode,
ReadWriteMode required_modes) {
if ((mode & required_modes) != required_modes) {
if (!!(required_modes & ReadWriteMode::read) &&
!(mode & ReadWriteMode::read)) {
return absl::InvalidArgumentError("Read mode not supported");
}
if (!!(required_modes & ReadWriteMode::write) &&
!(mode & ReadWriteMode::write)) {
return absl::InvalidArgumentError("Write mode not supported");
}
}
return absl::OkStatus();
}
}
} | #include "tensorstore/open_mode.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::OpenMode;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::StrCat;
static_assert(ReadWriteMode::read_write ==
(ReadWriteMode::read | ReadWriteMode::write));
static_assert((ReadWriteMode::read_write & ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(!ReadWriteMode::dynamic);
static_assert(tensorstore::internal::StaticReadWriteMask(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::write) == ReadWriteMode::write);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::dynamic) == ReadWriteMode::read_write);
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::read));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::write));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::dynamic,
ReadWriteMode::dynamic));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::read, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::write, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::write));
TEST(ReadWriteModeTest, PrintToOstream) {
EXPECT_EQ("dynamic", StrCat(ReadWriteMode::dynamic));
EXPECT_EQ("read", StrCat(ReadWriteMode::read));
EXPECT_EQ("write", StrCat(ReadWriteMode::write));
EXPECT_EQ("read_write", StrCat(ReadWriteMode::read_write));
EXPECT_EQ("<unknown>", StrCat(static_cast<ReadWriteMode>(10)));
}
TEST(OpenTest, PrintToOstream) {
EXPECT_EQ("", StrCat(OpenMode{}));
EXPECT_EQ("open", StrCat(OpenMode::open));
EXPECT_EQ("create", StrCat(OpenMode::create));
EXPECT_EQ("open|create", StrCat(OpenMode::open | OpenMode::create));
EXPECT_EQ("open|assume_metadata",
StrCat(OpenMode::open | OpenMode::assume_metadata));
EXPECT_EQ("create|delete_existing",
StrCat(OpenMode::create | OpenMode::delete_existing));
}
} | std::ostream& operator<<(std::ostream& os, ReadWriteMode mode) {
return os << to_string(mode);
} | TEST(ReadWriteModeTest, PrintToOstream) {
EXPECT_EQ("dynamic", StrCat(ReadWriteMode::dynamic));
EXPECT_EQ("read", StrCat(ReadWriteMode::read));
EXPECT_EQ("write", StrCat(ReadWriteMode::write));
EXPECT_EQ("read_write", StrCat(ReadWriteMode::read_write));
EXPECT_EQ("<unknown>", StrCat(static_cast<ReadWriteMode>(10)));
} |
#include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/strings/escaping.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
class CompleteGroupCall : public CancellableCall {
public:
CompleteGroupCall(const CollGroupParams& group,
const DeviceAttributes& device,
CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_device_type(group.device_type.type_string());
*req_.mutable_device_attributes() = device;
}
~CompleteGroupCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteGroupAsync(&opts_, &req_, &resp_, done);
}
CompleteGroupRequest req_;
CompleteGroupResponse resp_;
};
class CompleteInstanceCall : public CancellableCall {
public:
CompleteInstanceCall(const CollGroupParams& group,
const CollInstanceParams& instance,
const string& node_name, const string& device_name,
bool is_source, CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_name(node_name);
req_.set_type(instance.type);
req_.set_step_id(instance.step_id);
req_.set_data_type(instance.data_type);
instance.shape.AsProto(req_.mutable_shape());
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_instance_key(instance.instance_key);
req_.set_device_type(group.device_type.type_string());
for (int32_t offset : instance.impl_details.subdiv_offsets) {
req_.add_subdiv_offset(offset);
}
req_.set_device(device_name);
req_.set_is_source(is_source);
}
~CompleteInstanceCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteInstanceAsync(&opts_, &req_, &resp_, done);
}
CompleteInstanceRequest req_;
CompleteInstanceResponse resp_;
};
}
CollectiveParamResolverDistributed::CollectiveParamResolverDistributed(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverDistributed* dev_resolver,
NcclCommunicatorInterface* nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name)
: CollectiveParamResolverLocal(config, dev_mgr, dev_resolver,
nccl_communicator, task_name),
worker_cache_(worker_cache),
group_leader_(task_name == config.experimental().collective_group_leader()
? ""
: config.experimental().collective_group_leader()) {
VLOG(1) << "CompleteParamResolverDistributed ctor task={" << task_name
<< "} config.collective_group_leader={"
<< config.experimental().collective_group_leader() << "}"
<< " config.collective_nccl={"
<< config.experimental().collective_nccl() << "}";
}
void CollectiveParamResolverDistributed::CompleteParamsAsync(
const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteParams distributed " << device.name() << " for " << cp
<< ": " << cp->ToString();
if (cp->run_group_initialization) {
CompleteGroupDistributed(
device, &cp->group, cancel_mgr,
[this, device, cp, cancel_mgr, done](Status s) {
if (s.ok()) {
std::vector<DeviceAttributes> devices;
devices.reserve(cp->group.group_size);
for (const CollGroupMember& m : cp->group.members) {
devices.push_back(m.device);
}
s = dev_resolver_->UpdateDeviceAttributes(devices);
}
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
});
} else {
auto s = LookupGroup(cp->group.group_key, &cp->group);
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
}
}
void CollectiveParamResolverDistributed::CompleteGroupAsync(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
CompleteGroupDistributed(device, group_params, cancel_mgr, done);
}
void CollectiveParamResolverDistributed::CompleteInstanceAsync(
const CompleteInstanceRequest* request, CompleteInstanceResponse* response,
CancellationManager* cancel_mgr, const StatusCallback& done) {
GroupRec* gr = GetCachedGroup(request->group_key());
if (gr == nullptr) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" not found. This normally means the server has restarted"));
return;
}
CollectiveParams* cp = new CollectiveParams;
{
mutex_lock l(gr->mu);
if (!gr->status.ok()) {
done(gr->status);
return;
} else if (gr->group.members.size() != gr->group.group_size) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" failed to resolve. This normally means the server has restarted"));
return;
}
cp->group = gr->group;
}
cp->name = request->name();
cp->instance.type = CollectiveType(request->type());
cp->instance.instance_key = request->instance_key();
cp->instance.step_id = request->step_id();
cp->instance.data_type = request->data_type();
cp->instance.shape = TensorShape(request->shape());
cp->is_source = request->is_source();
for (int32_t offset : request->subdiv_offset()) {
cp->instance.impl_details.subdiv_offsets.push_back(offset);
}
StatusCallback done_and_cleanup = [cp, done](const Status& s) {
done(s);
cp->Unref();
};
CompleteInstanceDistributed(
request->device(), cp, cancel_mgr,
[this, cp, response, done_and_cleanup](Status status) {
if (status.ok()) {
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
{
mutex_lock l(ir->mu);
status = ir->status;
if (ir->status.ok()) {
response->set_instance_key(cp->instance.instance_key);
response->set_source_rank(ir->source_rank);
}
}
}
done_and_cleanup(status);
});
}
CollectiveParamResolverDistributed::GroupRec*
CollectiveParamResolverDistributed::GetCachedGroup(int32_t group_key) {
mutex_lock l(group_mu_);
auto it = group_table_.find(group_key);
if (it == group_table_.end()) {
return nullptr;
}
return it->second.get();
}
Status CollectiveParamResolverDistributed::UpdateGroupCache(
const CompleteGroupResponse& resp) {
std::unique_ptr<GroupRec> gr(new GroupRec);
{
mutex_lock grl(gr->mu);
gr->group.device_type = DeviceType(resp.device_type());
gr->group.group_key = resp.group_key();
gr->group.group_size = resp.group_size();
gr->group.num_tasks = resp.num_tasks();
if (resp.device_attributes().empty()) {
return errors::Internal(
"CompleteGroupResponse device_attributes is empty. Make sure you're "
"running the same version of Tensorflow on all workers.");
}
if (resp.device_attributes_size() != gr->group.group_size) {
return errors::Internal(
"CompleteGroupResponse group_size doesn't match device_name list");
}
gr->group.members.reserve(resp.device_attributes().size());
for (const DeviceAttributes& device : resp.device_attributes()) {
CollGroupMember member;
member.device = device;
gr->group.members.push_back(std::move(member));
gr->incarnations_by_device_name[device.name()] = device.incarnation();
}
gr->group.runtime_details.communicator_key = resp.communicator_key();
FinishGroup(gr.get());
}
GroupRec* previous_gr = nullptr;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(resp.group_key());
if (it == group_table_.end()) {
VLOG(2) << "UpdateGroupCache: communicator_key="
<< absl::CEscape(resp.communicator_key());
group_table_[gr->group.group_key] = std::move(gr);
} else {
previous_gr = it->second.get();
}
}
if (previous_gr != nullptr) {
mutex_lock grl(previous_gr->mu);
if (previous_gr->group.runtime_details.communicator_key !=
resp.communicator_key()) {
return errors::Internal(
"UpdateGroupCache: CompleteGroupResponse for group ",
resp.group_key(),
" gives communicator_key=", absl::CEscape(resp.communicator_key()),
" but cache already holds communicator_key=",
absl::CEscape(previous_gr->group.runtime_details.communicator_key));
}
}
return absl::OkStatus();
}
void CollectiveParamResolverDistributed::CompleteGroupDistributed(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteGroupDistributed group_key=" << group_params->group_key
<< " dev: " << device.name()
<< " is_leader=" << (group_leader_.empty());
if (group_leader_.empty()) {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else if (GetCachedGroup(group_params->group_key) == nullptr) {
CompleteGroupCall* call = new CompleteGroupCall(
*group_params, device, cancel_mgr, group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, group_params, call, cancel_mgr, abortion_token,
done](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
Status status = UpdateGroupCache(call->resp_);
if (status.ok()) {
CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else {
done(status);
}
} else {
done(s);
}
delete call;
});
return;
} else {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
}
}
bool CollectiveParamResolverDistributed::InstanceIsCached(
int32_t group_key, const CollInstanceParams& instance) {
mutex_lock l(instance_mu_);
auto group_it = instance_table_.find(group_key);
if (group_it == instance_table_.end()) {
return false;
}
auto instance_it =
group_it->second.find({instance.step_id, instance.instance_key});
return instance_it != group_it->second.end();
}
Status CollectiveParamResolverDistributed::UpdateInstanceCache(
CollectiveParams* cp, const CompleteInstanceResponse& resp) {
int32_t source_rank = resp.source_rank();
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
mutex_lock l(ir->mu);
if (!ir->status.ok()) {
return ir->status;
}
if (ir->source_rank != source_rank) {
if (ir->source_rank >= 0) {
ir->status = errors::Internal(
"UpdateInstanceCache: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " gives source_rank=", source_rank,
" but cache already holds value=", ir->source_rank);
return ir->status;
}
ir->source_rank = source_rank;
}
if (ir->known_count < cp->group.group_size) {
ir->known_count = cp->group.group_size;
const int ir_known_size = ir->known.size();
if (ir_known_size != cp->group.group_size) {
ir->status = errors::Internal(
"UpdateInstanceCache:: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " has known.size()=", ir->known.size(),
" < group_size=", cp->group.group_size);
return ir->status;
}
for (int i = 0; i < ir_known_size; ++i) {
ir->known[i] = true;
}
}
return ir->status;
}
void CollectiveParamResolverDistributed::CompleteInstanceDistributed(
const string& device, CollectiveParams* cp, CancellationManager* cancel_mgr,
const StatusCallback& done) {
if (group_leader_.empty()) {
return CompleteInstanceLocal(device, cp, done);
} else if (InstanceIsCached(cp->group.group_key, cp->instance)) {
return CompleteInstanceLocal(device, cp, done);
} else {
CompleteInstanceCall* call = new CompleteInstanceCall(
cp->group, cp->instance, cp->name, device, cp->is_source, cancel_mgr,
group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, cp, call, abortion_token, done](Status s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
s = UpdateInstanceCache(cp, call->resp_);
}
if (s.ok()) {
CompleteInstanceLocal(device, cp, done);
} else {
done(s);
}
delete call;
});
return;
}
}
void CollectiveParamResolverDistributed::StartAbort(const Status& s) {
{
mutex_lock l(status_mu_);
if (!status_.ok()) {
VLOG(2) << "CollectiveParamResolverDistributed already aborted. Ignoring "
"subsequent abortion with status: "
<< s;
return;
}
status_ = s;
}
StartAbortLocal(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/distributed_runtime/worker.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
static std::unique_ptr<Device> NewDevice(const string& type,
const string& name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr);
}
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
class FakeNcclCommunicator : public NcclCommunicatorInterface {
public:
string GenerateCommunicatorKey() override { return "mock-communicator-key"; }
void Enqueue(std::shared_ptr<CollectiveContext> col_ctx,
StatusCallback done) override {
done(absl::OkStatus());
}
void StartAbort(const Status& s) override {}
};
class DeviceResDistTest : public ::testing::Test {
public:
~DeviceResDistTest() override {
for (auto& name_param : cp_) {
name_param.second->Unref();
}
}
protected:
void DefineWorkers(int num_workers, int num_devices,
const string& device_type, bool nccl) {
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
DefineWorker(name, device_type, num_devices, nccl);
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool nccl) {
ConfigProto config;
config.mutable_experimental()->set_collective_group_leader(
"/job:worker/replica:0/task:0");
config.mutable_experimental()->set_collective_nccl(nccl);
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i)));
}
device_mgrs_[worker_name] =
std::make_unique<StaticDeviceMgr>(std::move(devices));
std::vector<string>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto* d : device_mgrs_[worker_name]->ListDevices()) {
dv->push_back(d->name());
}
dev_resolvers_[worker_name] = std::make_unique<DeviceResolverDistributed>(
device_mgrs_[worker_name].get());
cp_resolvers_[worker_name] =
std::make_unique<CollectiveParamResolverDistributed>(
config, device_mgrs_[worker_name].get(),
dev_resolvers_[worker_name].get(), &nccl_communicator_, &wc_,
worker_name);
auto worker_env = std::make_unique<WorkerEnv>();
worker_env->env = Env::Default();
worker_env->device_mgr = device_mgrs_[worker_name].get();
worker_env->collective_executor_mgr =
std::make_unique<TestCollectiveExecutorMgr>(
cp_resolvers_[worker_name].get(), nullptr);
workers_[worker_name] = std::make_unique<Worker>(worker_env.get());
worker_envs_[worker_name] = std::move(worker_env);
wc_.AddWorker(worker_name, workers_[worker_name].get());
}
void DefineCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
int source_rank = 0) {
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
int idx = wi * num_devices + di;
string device_name =
strings::StrCat(task_name, "/device:", device_type, ":", di);
cp_[device_name] =
CreateCollectiveParams(num_workers, num_devices, device_type,
coll_type, idx == source_rank);
}
}
}
CollectiveParams* CreateCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type,
bool is_source) {
const int kGroupKey = 5;
const int kInstanceKey = 3;
auto* cp = new CollectiveParams();
cp->is_source = is_source;
cp->group.group_key = kGroupKey;
cp->group.group_size = num_workers * num_devices;
cp->group.device_type = DeviceType(device_type);
cp->group.num_tasks = num_workers;
cp->instance.instance_key = kInstanceKey;
cp->instance.type = coll_type;
cp->instance.data_type = DT_FLOAT;
cp->instance.shape = TensorShape({64});
cp->instance.impl_details.subdiv_offsets.push_back(0);
return cp;
}
void IssueRequests(int num_workers, int num_devices) {
{
mutex_lock l(mu_);
num_done_ = 0;
}
int group_size = num_workers * num_devices;
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
IssueRequest(task_name, device_name, group_size);
}
}
}
void IssueRequest(const string& task_name, const string& device_name,
int group_size) {
Device* device = nullptr;
TF_CHECK_OK(device_mgrs_[task_name]->LookupDevice(device_name, &device));
CollectiveParams* cp = cp_[device_name];
CollectiveParamResolverDistributed* cp_res = cp_resolvers_[task_name].get();
CHECK(cp_res);
cp_res->CompleteParamsAsync(
device->attributes(), cp, &cm_,
[this, device_name, group_size](const Status& s) {
status_[device_name] = s;
{
mutex_lock l(mu_);
++num_done_;
if (num_done_ == group_size) {
done_.notify_all();
}
}
});
}
void ValidateCollectiveParams(int num_workers, int num_devices) {
int device_count = num_workers * num_devices;
{
mutex_lock l(mu_);
if (num_done_ < device_count) {
done_.wait(l);
}
}
const int dev_count = num_workers * num_devices;
string dev0 = "/job:worker/replica:0/task:0/device:CPU:0";
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
int idx = wi * num_devices + di;
TF_ASSERT_OK(status_[device_name]);
EXPECT_EQ(cp_[device_name]->default_rank, idx);
EXPECT_EQ(cp_[device_name]->group.members.size(), dev_count);
EXPECT_EQ(cp_[device_name]->group.members[idx].device.name(),
device_name);
EXPECT_EQ(cp_[device_name]->group.members[idx].task, task_name);
ValidateDeviceResolver(*cp_[device_name], task_name);
if (idx > 0) {
EXPECT_EQ(cp_[dev0]->group.runtime_details.communicator_key,
cp_[device_name]->group.runtime_details.communicator_key);
for (int i = 0; i < dev_count; ++i) {
EXPECT_EQ(cp_[dev0]->group.members[i].device.name(),
cp_[device_name]->group.members[i].device.name());
EXPECT_EQ(cp_[dev0]->group.members[i].task,
cp_[device_name]->group.members[i].task);
}
}
}
}
}
void ValidateDeviceResolver(const CollectiveParams& cp, const string& task) {
for (const CollGroupMember& member : cp.group.members) {
DeviceAttributes attributes;
TF_ASSERT_OK(dev_resolvers_[task]->GetDeviceAttributes(
member.device.name(), &attributes));
}
}
void RestartWorker(int worker_idx, int num_workers, int num_devices,
const string& device_type, bool nccl,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
bool is_source = false) {
string worker_name =
strings::StrCat("/job:worker/replica:0/task:", worker_idx);
DefineWorker(worker_name, device_type, num_devices, nccl);
for (int i = 0; i < num_devices; ++i) {
string device_name =
strings::StrCat(worker_name, "/device:", device_type, ":", i);
if (cp_.find(device_name) != cp_.end()) {
cp_[device_name]->Unref();
}
cp_[device_name] = CreateCollectiveParams(
num_workers, num_devices, device_type, coll_type, is_source);
status_.erase(device_name);
}
}
FakeCache wc_;
FakeNcclCommunicator nccl_communicator_;
CancellationManager cm_;
absl::flat_hash_map<string, std::unique_ptr<DeviceMgr>> device_mgrs_;
absl::flat_hash_map<string, std::unique_ptr<DeviceResolverDistributed>>
dev_resolvers_;
absl::flat_hash_map<string,
std::unique_ptr<CollectiveParamResolverDistributed>>
cp_resolvers_;
absl::flat_hash_map<string, std::vector<string>> dev_by_task_;
absl::flat_hash_map<string, std::unique_ptr<WorkerEnv>> worker_envs_;
absl::flat_hash_map<string, std::unique_ptr<Worker>> workers_;
absl::flat_hash_map<string, CollectiveParams*> cp_;
absl::flat_hash_map<string, Status> status_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
};
TEST_F(DeviceResDistTest, Workers1Devices1) {
const int num_workers = 1;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers2Devices2) {
const int num_workers = 2;
const int num_devices = 2;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, DifferentIncarnation) {
const int num_workers = 2;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
RestartWorker(1, num_workers, num_devices, "CPU", false);
const string task_name = "/job:worker/replica:0/task:1";
const string device_name = absl::StrCat(task_name, "/device:CPU:0");
IssueRequest(task_name, device_name, num_workers * num_devices);
EXPECT_TRUE(errors::IsFailedPrecondition(status_[device_name]));
}
TEST_F(DeviceResDistTest, BroadcastSourceRank0) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 0;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, BroadcastSourceRank3) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 3;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers4Devices3) {
const int num_workers = 4;
const int num_devices = 3;
DefineWorkers(num_workers, num_devices, "CPU", true);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
}
} | void CollectiveParamResolverDistributed::CompleteInstanceAsync(
const CompleteInstanceRequest* request, CompleteInstanceResponse* response,
CancellationManager* cancel_mgr, const StatusCallback& done) {
GroupRec* gr = GetCachedGroup(request->group_key());
if (gr == nullptr) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" not found. This normally means the server has restarted"));
return;
}
CollectiveParams* cp = new CollectiveParams;
{
mutex_lock l(gr->mu);
if (!gr->status.ok()) {
done(gr->status);
return;
} else if (gr->group.members.size() != gr->group.group_size) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" failed to resolve. This normally means the server has restarted"));
return;
}
cp->group = gr->group;
}
cp->name = request->name();
cp->instance.type = CollectiveType(request->type());
cp->instance.instance_key = request->instance_key();
cp->instance.step_id = request->step_id();
cp->instance.data_type = request->data_type();
cp->instance.shape = TensorShape(request->shape());
cp->is_source = request->is_source();
for (int32_t offset : request->subdiv_offset()) {
cp->instance.impl_details.subdiv_offsets.push_back(offset);
}
StatusCallback done_and_cleanup = [cp, done](const Status& s) {
done(s);
cp->Unref();
};
CompleteInstanceDistributed(
request->device(), cp, cancel_mgr,
[this, cp, response, done_and_cleanup](Status status) {
if (status.ok()) {
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
{
mutex_lock l(ir->mu);
status = ir->status;
if (ir->status.ok()) {
response->set_instance_key(cp->instance.instance_key);
response->set_source_rank(ir->source_rank);
}
}
}
done_and_cleanup(status);
});
} | TEST_F(DeviceResDistTest, Workers1Devices1) {
const int num_workers = 1;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers2Devices2) {
const int num_workers = 2;
const int num_devices = 2;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, DifferentIncarnation) {
const int num_workers = 2;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
RestartWorker(1, num_workers, num_devices, "CPU", false);
const string task_name = "/job:worker/replica:0/task:1";
const string device_name = absl::StrCat(task_name, "/device:CPU:0");
IssueRequest(task_name, device_name, num_workers * num_devices);
EXPECT_TRUE(errors::IsFailedPrecondition(status_[device_name]));
}
TEST_F(DeviceResDistTest, BroadcastSourceRank0) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 0;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, BroadcastSourceRank3) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 3;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers4Devices3) {
const int num_workers = 4;
const int num_devices = 3;
DefineWorkers(num_workers, num_devices, "CPU", true);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
} |
#include "eval/compiler/resolver.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "base/kind.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_overload_reference.h"
#include "runtime/function_registry.h"
#include "runtime/type_registry.h"
namespace google::api::expr::runtime {
using ::cel::Value;
Resolver::Resolver(
absl::string_view container, const cel::FunctionRegistry& function_registry,
const cel::TypeRegistry&, cel::ValueManager& value_factory,
const absl::flat_hash_map<std::string, cel::TypeRegistry::Enumeration>&
resolveable_enums,
bool resolve_qualified_type_identifiers)
: namespace_prefixes_(),
enum_value_map_(),
function_registry_(function_registry),
value_factory_(value_factory),
resolveable_enums_(resolveable_enums),
resolve_qualified_type_identifiers_(resolve_qualified_type_identifiers) {
auto container_elements = absl::StrSplit(container, '.');
std::string prefix = "";
namespace_prefixes_.push_back(prefix);
for (const auto& elem : container_elements) {
if (elem.empty()) {
continue;
}
absl::StrAppend(&prefix, elem, ".");
namespace_prefixes_.insert(namespace_prefixes_.begin(), prefix);
}
for (const auto& prefix : namespace_prefixes_) {
for (auto iter = resolveable_enums_.begin();
iter != resolveable_enums_.end(); ++iter) {
absl::string_view enum_name = iter->first;
if (!absl::StartsWith(enum_name, prefix)) {
continue;
}
auto remainder = absl::StripPrefix(enum_name, prefix);
const auto& enum_type = iter->second;
for (const auto& enumerator : enum_type.enumerators) {
auto key = absl::StrCat(remainder, !remainder.empty() ? "." : "",
enumerator.name);
enum_value_map_[key] = value_factory.CreateIntValue(enumerator.number);
}
}
}
}
std::vector<std::string> Resolver::FullyQualifiedNames(absl::string_view name,
int64_t expr_id) const {
std::vector<std::string> names;
if (absl::StartsWith(name, ".")) {
std::string fully_qualified_name = std::string(name.substr(1));
names.push_back(fully_qualified_name);
return names;
}
for (const auto& prefix : namespace_prefixes_) {
std::string fully_qualified_name = absl::StrCat(prefix, name);
names.push_back(fully_qualified_name);
}
return names;
}
absl::optional<cel::Value> Resolver::FindConstant(absl::string_view name,
int64_t expr_id) const {
auto names = FullyQualifiedNames(name, expr_id);
for (const auto& name : names) {
auto enum_entry = enum_value_map_.find(name);
if (enum_entry != enum_value_map_.end()) {
return enum_entry->second;
}
if (resolve_qualified_type_identifiers_ || !absl::StrContains(name, ".")) {
auto type_value = value_factory_.FindType(name);
if (type_value.ok() && type_value->has_value()) {
return value_factory_.CreateTypeValue(**type_value);
}
}
}
return absl::nullopt;
}
std::vector<cel::FunctionOverloadReference> Resolver::FindOverloads(
absl::string_view name, bool receiver_style,
const std::vector<cel::Kind>& types, int64_t expr_id) const {
std::vector<cel::FunctionOverloadReference> funcs;
auto names = FullyQualifiedNames(name, expr_id);
for (auto it = names.begin(); it != names.end(); it++) {
funcs = function_registry_.FindStaticOverloads(*it, receiver_style, types);
if (!funcs.empty()) {
return funcs;
}
}
return funcs;
}
std::vector<cel::FunctionRegistry::LazyOverload> Resolver::FindLazyOverloads(
absl::string_view name, bool receiver_style,
const std::vector<cel::Kind>& types, int64_t expr_id) const {
std::vector<cel::FunctionRegistry::LazyOverload> funcs;
auto names = FullyQualifiedNames(name, expr_id);
for (const auto& name : names) {
funcs = function_registry_.FindLazyOverloads(name, receiver_style, types);
if (!funcs.empty()) {
return funcs;
}
}
return funcs;
}
absl::StatusOr<absl::optional<std::pair<std::string, cel::Type>>>
Resolver::FindType(absl::string_view name, int64_t expr_id) const {
auto qualified_names = FullyQualifiedNames(name, expr_id);
for (auto& qualified_name : qualified_names) {
CEL_ASSIGN_OR_RETURN(auto maybe_type,
value_factory_.FindType(qualified_name));
if (maybe_type.has_value()) {
return std::make_pair(std::move(qualified_name), std::move(*maybe_type));
}
}
return absl::nullopt;
}
} | #include "eval/compiler/resolver.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "base/type_provider.h"
#include "common/memory.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_type_registry.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/protobuf_descriptor_type_provider.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::IntValue;
using ::cel::TypeFactory;
using ::cel::TypeManager;
using ::cel::TypeValue;
using ::cel::ValueManager;
using testing::Eq;
class FakeFunction : public CelFunction {
public:
explicit FakeFunction(const std::string& name)
: CelFunction(CelFunctionDescriptor{name, false, {}}) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
google::protobuf::Arena* arena) const override {
return absl::OkStatus();
}
};
class ResolverTest : public testing::Test {
public:
ResolverTest()
: value_factory_(cel::MemoryManagerRef::ReferenceCounting(),
type_registry_.GetTypeProvider()) {}
protected:
CelTypeRegistry type_registry_;
cel::common_internal::LegacyValueManager value_factory_;
};
TEST_F(ResolverTest, TestFullyQualifiedNames) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames("simple_name");
std::vector<std::string> expected_names(
{"google.api.expr.simple_name", "google.api.simple_name",
"google.simple_name", "simple_name"});
EXPECT_THAT(names, Eq(expected_names));
}
TEST_F(ResolverTest, TestFullyQualifiedNamesPartiallyQualifiedName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames("expr.simple_name");
std::vector<std::string> expected_names(
{"google.api.expr.expr.simple_name", "google.api.expr.simple_name",
"google.expr.simple_name", "expr.simple_name"});
EXPECT_THAT(names, Eq(expected_names));
}
TEST_F(ResolverTest, TestFullyQualifiedNamesAbsoluteName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames(".google.api.expr.absolute_name");
EXPECT_THAT(names.size(), Eq(1));
EXPECT_THAT(names[0], Eq("google.api.expr.absolute_name"));
}
TEST_F(ResolverTest, TestFindConstantEnum) {
CelFunctionRegistry func_registry;
type_registry_.Register(TestMessage::TestEnum_descriptor());
Resolver resolver("google.api.expr.runtime.TestMessage",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto enum_value = resolver.FindConstant("TestEnum.TEST_ENUM_1", -1);
ASSERT_TRUE(enum_value);
ASSERT_TRUE(enum_value->Is<IntValue>());
EXPECT_THAT((*enum_value).As<IntValue>().NativeValue(), Eq(1L));
enum_value = resolver.FindConstant(
".google.api.expr.runtime.TestMessage.TestEnum.TEST_ENUM_2", -1);
ASSERT_TRUE(enum_value);
ASSERT_TRUE(enum_value->Is<IntValue>());
EXPECT_THAT((*enum_value).As<IntValue>().NativeValue(), Eq(2L));
}
TEST_F(ResolverTest, TestFindConstantUnqualifiedType) {
CelFunctionRegistry func_registry;
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto type_value = resolver.FindConstant("int", -1);
EXPECT_TRUE(type_value);
EXPECT_TRUE(type_value->Is<TypeValue>());
EXPECT_THAT((*type_value).As<TypeValue>().name(), Eq("int"));
}
TEST_F(ResolverTest, TestFindConstantFullyQualifiedType) {
google::protobuf::LinkMessageReflection<TestMessage>();
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto type_value =
resolver.FindConstant(".google.api.expr.runtime.TestMessage", -1);
ASSERT_TRUE(type_value);
ASSERT_TRUE(type_value->Is<TypeValue>());
EXPECT_THAT((*type_value).As<TypeValue>().name(),
Eq("google.api.expr.runtime.TestMessage"));
}
TEST_F(ResolverTest, TestFindConstantQualifiedTypeDisabled) {
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums(), false);
auto type_value =
resolver.FindConstant(".google.api.expr.runtime.TestMessage", -1);
EXPECT_FALSE(type_value);
}
TEST_F(ResolverTest, FindTypeBySimpleName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr.runtime",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType("TestMessage", -1));
EXPECT_TRUE(type.has_value());
EXPECT_EQ(type->second->name(), "google.api.expr.runtime.TestMessage");
}
TEST_F(ResolverTest, FindTypeByQualifiedName) {
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("google.api.expr.runtime",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
ASSERT_OK_AND_ASSIGN(
auto type, resolver.FindType(".google.api.expr.runtime.TestMessage", -1));
ASSERT_TRUE(type.has_value());
EXPECT_EQ(type->second->name(), "google.api.expr.runtime.TestMessage");
}
TEST_F(ResolverTest, TestFindDescriptorNotFound) {
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("google.api.expr.runtime",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType("UndefinedMessage", -1));
EXPECT_FALSE(type.has_value()) << type->second;
}
TEST_F(ResolverTest, TestFindOverloads) {
CelFunctionRegistry func_registry;
auto status =
func_registry.Register(std::make_unique<FakeFunction>("fake_func"));
ASSERT_OK(status);
status = func_registry.Register(
std::make_unique<FakeFunction>("cel.fake_ns_func"));
ASSERT_OK(status);
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto overloads =
resolver.FindOverloads("fake_func", false, ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
EXPECT_THAT(overloads[0].descriptor.name(), Eq("fake_func"));
overloads =
resolver.FindOverloads("fake_ns_func", false, ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
EXPECT_THAT(overloads[0].descriptor.name(), Eq("cel.fake_ns_func"));
}
TEST_F(ResolverTest, TestFindLazyOverloads) {
CelFunctionRegistry func_registry;
auto status = func_registry.RegisterLazyFunction(
CelFunctionDescriptor{"fake_lazy_func", false, {}});
ASSERT_OK(status);
status = func_registry.RegisterLazyFunction(
CelFunctionDescriptor{"cel.fake_lazy_ns_func", false, {}});
ASSERT_OK(status);
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto overloads =
resolver.FindLazyOverloads("fake_lazy_func", false, ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
overloads = resolver.FindLazyOverloads("fake_lazy_ns_func", false,
ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
}
}
} | std::vector<std::string> Resolver::FullyQualifiedNames(absl::string_view name,
int64_t expr_id) const {
std::vector<std::string> names;
if (absl::StartsWith(name, ".")) {
std::string fully_qualified_name = std::string(name.substr(1));
names.push_back(fully_qualified_name);
return names;
}
for (const auto& prefix : namespace_prefixes_) {
std::string fully_qualified_name = absl::StrCat(prefix, name);
names.push_back(fully_qualified_name);
}
return names;
} | TEST_F(ResolverTest, TestFullyQualifiedNames) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames("simple_name");
std::vector<std::string> expected_names(
{"google.api.expr.simple_name", "google.api.simple_name",
"google.simple_name", "simple_name"});
EXPECT_THAT(names, Eq(expected_names));
}
TEST_F(ResolverTest, TestFullyQualifiedNamesPartiallyQualifiedName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames("expr.simple_name");
std::vector<std::string> expected_names(
{"google.api.expr.expr.simple_name", "google.api.expr.simple_name",
"google.expr.simple_name", "expr.simple_name"});
EXPECT_THAT(names, Eq(expected_names));
}
TEST_F(ResolverTest, TestFullyQualifiedNamesAbsoluteName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames(".google.api.expr.absolute_name");
EXPECT_THAT(names.size(), Eq(1));
EXPECT_THAT(names[0], Eq("google.api.expr.absolute_name"));
} |
#ifndef TENSORFLOW_TSL_LIB_GTL_MAP_UTIL_H_
#define TENSORFLOW_TSL_LIB_GTL_MAP_UTIL_H_
#include <stddef.h>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include "tsl/lib/gtl/subtle/map_traits.h"
namespace tsl {
namespace gtl {
template <class Collection>
const typename Collection::value_type::second_type* FindOrNull(
const Collection& collection,
const typename Collection::value_type::first_type& key) {
typename Collection::const_iterator it = collection.find(key);
if (it == collection.end()) {
return nullptr;
}
return &it->second;
}
template <class Collection>
typename Collection::value_type::second_type* FindOrNull(
Collection& collection,
const typename Collection::value_type::first_type& key) {
typename Collection::iterator it = collection.find(key);
if (it == collection.end()) {
return nullptr;
}
return &it->second;
}
template <class Collection>
typename Collection::value_type::second_type FindPtrOrNull(
const Collection& collection,
const typename Collection::value_type::first_type& key) {
typename Collection::const_iterator it = collection.find(key);
if (it == collection.end()) {
return typename Collection::value_type::second_type();
}
return it->second;
}
template <class Collection>
const typename Collection::value_type::second_type& FindWithDefault(
const Collection& collection,
const typename Collection::value_type::first_type& key,
const typename Collection::value_type::second_type& value) {
typename Collection::const_iterator it = collection.find(key);
if (it == collection.end()) {
return value;
}
return it->second;
}
template <class Collection>
bool InsertOrUpdate(Collection* const collection,
const typename Collection::value_type& vt) {
std::pair<typename Collection::iterator, bool> ret = collection->insert(vt);
if (!ret.second) {
ret.first->second = vt.second;
return false;
}
return true;
}
template <class Collection>
bool InsertOrUpdate(Collection* const collection,
const typename Collection::value_type::first_type& key,
const typename Collection::value_type::second_type& value) {
return InsertOrUpdate(collection,
typename Collection::value_type(key, value));
}
template <class Collection>
bool InsertIfNotPresent(Collection* const collection,
const typename Collection::value_type& vt) {
return collection->insert(vt).second;
}
template <class Collection>
bool InsertIfNotPresent(
Collection* const collection,
const typename Collection::value_type::first_type& key,
const typename Collection::value_type::second_type& value) {
return InsertIfNotPresent(collection,
typename Collection::value_type(key, value));
}
template <class Collection>
typename Collection::value_type::second_type& LookupOrInsert(
Collection* const collection, const typename Collection::value_type& vt) {
return collection->insert(vt).first->second;
}
template <class Collection>
typename Collection::value_type::second_type& LookupOrInsert(
Collection* const collection,
const typename Collection::value_type::first_type& key,
const typename Collection::value_type::second_type& value) {
return LookupOrInsert(collection,
typename Collection::value_type(key, value));
}
template <typename M, typename ReverseM>
bool ReverseMap(const M& m, ReverseM* reverse) {
bool all_unique = true;
for (const auto& kv : m) {
if (!InsertOrUpdate(reverse, kv.second, kv.first)) {
all_unique = false;
}
}
return all_unique;
}
template <typename ReverseM, typename M>
ReverseM ReverseMap(const M& m) {
typename std::remove_const<ReverseM>::type reverse;
ReverseMap(m, &reverse);
return reverse;
}
template <typename Collection>
typename Collection::value_type::second_type EraseKeyReturnValuePtr(
Collection* collection,
const typename Collection::value_type::first_type& key) {
auto it = collection->find(key);
if (it == collection->end()) return nullptr;
auto v = gtl::subtle::GetMapped(*it);
collection->erase(it);
return v;
}
}
}
#endif | #include "tsl/lib/gtl/map_util.h"
#include <map>
#include <set>
#include <string>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TEST(MapUtil, Find) {
typedef std::map<string, string> Map;
Map m;
EXPECT_EQ("", gtl::FindWithDefault(m, "foo", ""));
m["foo"] = "bar";
EXPECT_EQ("bar", gtl::FindWithDefault(m, "foo", ""));
EXPECT_EQ("bar", *gtl::FindOrNull(m, "foo"));
EXPECT_TRUE(m.count("foo") > 0);
EXPECT_EQ(m["foo"], "bar");
}
TEST(MapUtil, LookupOrInsert) {
typedef std::map<string, string> Map;
Map m;
EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "xyz"));
EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "abc"));
}
TEST(MapUtil, InsertIfNotPresent) {
typedef std::set<int> Set;
Set s;
EXPECT_TRUE(gtl::InsertIfNotPresent(&s, 0));
EXPECT_EQ(s.count(0), 1);
EXPECT_FALSE(gtl::InsertIfNotPresent(&s, 0));
EXPECT_EQ(s.count(0), 1);
}
} | template <class Collection>
typename Collection::value_type::second_type* FindOrNull(
Collection& collection,
const typename Collection::value_type::first_type& key) {
typename Collection::iterator it = collection.find(key);
if (it == collection.end()) {
return nullptr;
}
return &it->second;
} | #include "tsl/lib/gtl/map_util.h"
#include <map>
#include <set>
#include <string>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TEST(MapUtil, Find) {
typedef std::map<string, string> Map;
Map m;
EXPECT_EQ("", gtl::FindWithDefault(m, "foo", ""));
m["foo"] = "bar";
EXPECT_EQ("bar", gtl::FindWithDefault(m, "foo", ""));
EXPECT_EQ("bar", *gtl::FindOrNull(m, "foo"));
EXPECT_TRUE(m.count("foo") > 0);
EXPECT_EQ(m["foo"], "bar");
} |
#include "base/ast_internal/ast_impl.h"
#include <cstdint>
#include "absl/container/flat_hash_map.h"
namespace cel::ast_internal {
namespace {
const Type& DynSingleton() {
static auto* singleton = new Type(TypeKind(DynamicType()));
return *singleton;
}
}
const Type& AstImpl::GetType(int64_t expr_id) const {
auto iter = type_map_.find(expr_id);
if (iter == type_map_.end()) {
return DynSingleton();
}
return iter->second;
}
const Type& AstImpl::GetReturnType() const { return GetType(root_expr().id()); }
const Reference* AstImpl::GetReference(int64_t expr_id) const {
auto iter = reference_map_.find(expr_id);
if (iter == reference_map_.end()) {
return nullptr;
}
return &iter->second;
}
} | #include "base/ast_internal/ast_impl.h"
#include <utility>
#include "base/ast.h"
#include "base/ast_internal/expr.h"
#include "internal/testing.h"
namespace cel::ast_internal {
namespace {
using testing::Pointee;
using testing::Truly;
TEST(AstImpl, ParsedExprCtor) {
ParsedExpr parsed_expr;
auto& call = parsed_expr.mutable_expr().mutable_call_expr();
parsed_expr.mutable_expr().set_id(5);
call.set_function("_==_");
auto& eq_lhs = call.mutable_args().emplace_back();
eq_lhs.mutable_call_expr().set_function("_+_");
eq_lhs.set_id(3);
auto& sum_lhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_lhs.mutable_const_expr().set_int_value(2);
sum_lhs.set_id(1);
auto& sum_rhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_rhs.mutable_const_expr().set_int_value(1);
sum_rhs.set_id(2);
auto& eq_rhs = call.mutable_args().emplace_back();
eq_rhs.mutable_const_expr().set_int_value(3);
eq_rhs.set_id(4);
parsed_expr.mutable_source_info().mutable_positions()[5] = 6;
AstImpl ast_impl(std::move(parsed_expr));
Ast& ast = ast_impl;
ASSERT_FALSE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReturnType(), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReference(1), nullptr);
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 5);
EXPECT_EQ(ast_impl.source_info().positions().at(5), 6);
}
TEST(AstImpl, RawExprCtor) {
Expr expr;
auto& call = expr.mutable_call_expr();
expr.set_id(5);
call.set_function("_==_");
auto& eq_lhs = call.mutable_args().emplace_back();
eq_lhs.mutable_call_expr().set_function("_+_");
eq_lhs.set_id(3);
auto& sum_lhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_lhs.mutable_const_expr().set_int_value(2);
sum_lhs.set_id(1);
auto& sum_rhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_rhs.mutable_const_expr().set_int_value(1);
sum_rhs.set_id(2);
auto& eq_rhs = call.mutable_args().emplace_back();
eq_rhs.mutable_const_expr().set_int_value(3);
eq_rhs.set_id(4);
SourceInfo source_info;
source_info.mutable_positions()[5] = 6;
AstImpl ast_impl(std::move(expr), std::move(source_info));
Ast& ast = ast_impl;
ASSERT_FALSE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReturnType(), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReference(1), nullptr);
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 5);
EXPECT_EQ(ast_impl.source_info().positions().at(5), 6);
}
TEST(AstImpl, CheckedExprCtor) {
CheckedExpr expr;
expr.mutable_expr().mutable_ident_expr().set_name("int_value");
expr.mutable_expr().set_id(1);
Reference ref;
ref.set_name("com.int_value");
expr.mutable_reference_map()[1] = Reference(ref);
expr.mutable_type_map()[1] = Type(PrimitiveType::kInt64);
expr.mutable_source_info().set_syntax_version("1.0");
AstImpl ast_impl(std::move(expr));
Ast& ast = ast_impl;
ASSERT_TRUE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(PrimitiveType::kInt64));
EXPECT_THAT(ast_impl.GetReference(1),
Pointee(Truly([&ref](const Reference& arg) {
return arg.name() == ref.name();
})));
EXPECT_EQ(ast_impl.GetReturnType(), Type(PrimitiveType::kInt64));
EXPECT_TRUE(ast_impl.root_expr().has_ident_expr());
EXPECT_EQ(ast_impl.root_expr().ident_expr().name(), "int_value");
EXPECT_EQ(ast_impl.root_expr().id(), 1);
EXPECT_EQ(ast_impl.source_info().syntax_version(), "1.0");
}
TEST(AstImpl, CheckedExprDeepCopy) {
CheckedExpr expr;
auto& root = expr.mutable_expr();
root.set_id(3);
root.mutable_call_expr().set_function("_==_");
root.mutable_call_expr().mutable_args().resize(2);
auto& lhs = root.mutable_call_expr().mutable_args()[0];
auto& rhs = root.mutable_call_expr().mutable_args()[1];
expr.mutable_type_map()[3] = Type(PrimitiveType::kBool);
lhs.mutable_ident_expr().set_name("int_value");
lhs.set_id(1);
Reference ref;
ref.set_name("com.int_value");
expr.mutable_reference_map()[1] = std::move(ref);
expr.mutable_type_map()[1] = Type(PrimitiveType::kInt64);
rhs.mutable_const_expr().set_int_value(2);
rhs.set_id(2);
expr.mutable_type_map()[2] = Type(PrimitiveType::kInt64);
expr.mutable_source_info().set_syntax_version("1.0");
AstImpl ast_impl(std::move(expr));
Ast& ast = ast_impl;
ASSERT_TRUE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(PrimitiveType::kInt64));
EXPECT_THAT(ast_impl.GetReference(1), Pointee(Truly([](const Reference& arg) {
return arg.name() == "com.int_value";
})));
EXPECT_EQ(ast_impl.GetReturnType(), Type(PrimitiveType::kBool));
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 3);
EXPECT_EQ(ast_impl.source_info().syntax_version(), "1.0");
}
}
} | const Reference* AstImpl::GetReference(int64_t expr_id) const {
auto iter = reference_map_.find(expr_id);
if (iter == reference_map_.end()) {
return nullptr;
}
return &iter->second;
} | TEST(AstImpl, ParsedExprCtor) {
ParsedExpr parsed_expr;
auto& call = parsed_expr.mutable_expr().mutable_call_expr();
parsed_expr.mutable_expr().set_id(5);
call.set_function("_==_");
auto& eq_lhs = call.mutable_args().emplace_back();
eq_lhs.mutable_call_expr().set_function("_+_");
eq_lhs.set_id(3);
auto& sum_lhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_lhs.mutable_const_expr().set_int_value(2);
sum_lhs.set_id(1);
auto& sum_rhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_rhs.mutable_const_expr().set_int_value(1);
sum_rhs.set_id(2);
auto& eq_rhs = call.mutable_args().emplace_back();
eq_rhs.mutable_const_expr().set_int_value(3);
eq_rhs.set_id(4);
parsed_expr.mutable_source_info().mutable_positions()[5] = 6;
AstImpl ast_impl(std::move(parsed_expr));
Ast& ast = ast_impl;
ASSERT_FALSE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReturnType(), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReference(1), nullptr);
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 5);
EXPECT_EQ(ast_impl.source_info().positions().at(5), 6);
}
TEST(AstImpl, RawExprCtor) {
Expr expr;
auto& call = expr.mutable_call_expr();
expr.set_id(5);
call.set_function("_==_");
auto& eq_lhs = call.mutable_args().emplace_back();
eq_lhs.mutable_call_expr().set_function("_+_");
eq_lhs.set_id(3);
auto& sum_lhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_lhs.mutable_const_expr().set_int_value(2);
sum_lhs.set_id(1);
auto& sum_rhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_rhs.mutable_const_expr().set_int_value(1);
sum_rhs.set_id(2);
auto& eq_rhs = call.mutable_args().emplace_back();
eq_rhs.mutable_const_expr().set_int_value(3);
eq_rhs.set_id(4);
SourceInfo source_info;
source_info.mutable_positions()[5] = 6;
AstImpl ast_impl(std::move(expr), std::move(source_info));
Ast& ast = ast_impl;
ASSERT_FALSE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReturnType(), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReference(1), nullptr);
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 5);
EXPECT_EQ(ast_impl.source_info().positions().at(5), 6);
}
TEST(AstImpl, CheckedExprCtor) {
CheckedExpr expr;
expr.mutable_expr().mutable_ident_expr().set_name("int_value");
expr.mutable_expr().set_id(1);
Reference ref;
ref.set_name("com.int_value");
expr.mutable_reference_map()[1] = Reference(ref);
expr.mutable_type_map()[1] = Type(PrimitiveType::kInt64);
expr.mutable_source_info().set_syntax_version("1.0");
AstImpl ast_impl(std::move(expr));
Ast& ast = ast_impl;
ASSERT_TRUE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(PrimitiveType::kInt64));
EXPECT_THAT(ast_impl.GetReference(1),
Pointee(Truly([&ref](const Reference& arg) {
return arg.name() == ref.name();
})));
EXPECT_EQ(ast_impl.GetReturnType(), Type(PrimitiveType::kInt64));
EXPECT_TRUE(ast_impl.root_expr().has_ident_expr());
EXPECT_EQ(ast_impl.root_expr().ident_expr().name(), "int_value");
EXPECT_EQ(ast_impl.root_expr().id(), 1);
EXPECT_EQ(ast_impl.source_info().syntax_version(), "1.0");
}
TEST(AstImpl, CheckedExprDeepCopy) {
CheckedExpr expr;
auto& root = expr.mutable_expr();
root.set_id(3);
root.mutable_call_expr().set_function("_==_");
root.mutable_call_expr().mutable_args().resize(2);
auto& lhs = root.mutable_call_expr().mutable_args()[0];
auto& rhs = root.mutable_call_expr().mutable_args()[1];
expr.mutable_type_map()[3] = Type(PrimitiveType::kBool);
lhs.mutable_ident_expr().set_name("int_value");
lhs.set_id(1);
Reference ref;
ref.set_name("com.int_value");
expr.mutable_reference_map()[1] = std::move(ref);
expr.mutable_type_map()[1] = Type(PrimitiveType::kInt64);
rhs.mutable_const_expr().set_int_value(2);
rhs.set_id(2);
expr.mutable_type_map()[2] = Type(PrimitiveType::kInt64);
expr.mutable_source_info().set_syntax_version("1.0");
AstImpl ast_impl(std::move(expr));
Ast& ast = ast_impl;
ASSERT_TRUE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(PrimitiveType::kInt64));
EXPECT_THAT(ast_impl.GetReference(1), Pointee(Truly([](const Reference& arg) {
return arg.name() == "com.int_value";
})));
EXPECT_EQ(ast_impl.GetReturnType(), Type(PrimitiveType::kBool));
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 3);
EXPECT_EQ(ast_impl.source_info().syntax_version(), "1.0");
} |
#include "extensions/protobuf/internal/wrappers.h"
#include <cstdint>
#include <string>
#include "google/protobuf/wrappers.pb.h"
#include "absl/base/optimization.h"
#include "absl/functional/function_ref.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "extensions/protobuf/internal/wrappers_lite.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel::extensions::protobuf_internal {
namespace {
template <typename P>
using FieldGetterRef =
absl::FunctionRef<P(const google::protobuf::Reflection&, const google::protobuf::Message&,
const google::protobuf::FieldDescriptor*)>;
template <typename T, typename P>
using GeneratedUnwrapperRef = absl::FunctionRef<absl::StatusOr<P>(const T&)>;
template <typename P>
using FieldSetterRef =
absl::FunctionRef<void(const google::protobuf::Reflection&, google::protobuf::Message*,
const google::protobuf::FieldDescriptor*, const P&)>;
template <typename T, typename P>
using GeneratedWrapperRef = absl::FunctionRef<absl::Status(const P&, T&)>;
template <typename T, typename P>
absl::StatusOr<P> UnwrapValueProto(const google::protobuf::Message& message,
google::protobuf::FieldDescriptor::CppType cpp_type,
GeneratedUnwrapperRef<T, P> unwrapper,
FieldGetterRef<P> getter) {
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if (ABSL_PREDICT_TRUE(desc == T::descriptor())) {
return unwrapper(google::protobuf::DownCastToGenerated<T>(message));
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* value_field = desc->FindFieldByNumber(T::kValueFieldNumber);
if (ABSL_PREDICT_FALSE(value_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing value field descriptor"));
}
if (ABSL_PREDICT_FALSE(value_field->cpp_type() != cpp_type)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected value field type: ", value_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(value_field->is_map() || value_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(),
" has unexpected value field cardinality: REPEATED"));
}
return getter(*reflect, message, value_field);
}
template <typename T, typename P>
absl::Status WrapValueProto(google::protobuf::Message& message, const P& value,
google::protobuf::FieldDescriptor::CppType cpp_type,
GeneratedWrapperRef<T, P> wrapper,
FieldSetterRef<P> setter) {
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if (ABSL_PREDICT_TRUE(desc == T::descriptor())) {
return wrapper(value, google::protobuf::DownCastToGenerated<T>(message));
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* value_field = desc->FindFieldByNumber(T::kValueFieldNumber);
if (ABSL_PREDICT_FALSE(value_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing value field descriptor"));
}
if (ABSL_PREDICT_FALSE(value_field->cpp_type() != cpp_type)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected value field type: ", value_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(value_field->is_map() || value_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(),
" has unexpected value field cardinality: REPEATED"));
}
setter(*reflect, &message, value_field, value);
return absl::OkStatus();
}
}
absl::StatusOr<bool> UnwrapDynamicBoolValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.BoolValue");
return UnwrapValueProto<google::protobuf::BoolValue, bool>(
message, google::protobuf::FieldDescriptor::CPPTYPE_BOOL,
UnwrapGeneratedBoolValueProto, &google::protobuf::Reflection::GetBool);
}
absl::StatusOr<absl::Cord> UnwrapDynamicBytesValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.BytesValue");
return UnwrapValueProto<google::protobuf::BytesValue, absl::Cord>(
message, google::protobuf::FieldDescriptor::CPPTYPE_STRING,
UnwrapGeneratedBytesValueProto, &google::protobuf::Reflection::GetCord);
}
absl::StatusOr<double> UnwrapDynamicFloatValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.FloatValue");
return UnwrapValueProto<google::protobuf::FloatValue, double>(
message, google::protobuf::FieldDescriptor::CPPTYPE_FLOAT,
UnwrapGeneratedFloatValueProto, &google::protobuf::Reflection::GetFloat);
}
absl::StatusOr<double> UnwrapDynamicDoubleValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.DoubleValue");
return UnwrapValueProto<google::protobuf::DoubleValue, double>(
message, google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE,
UnwrapGeneratedDoubleValueProto, &google::protobuf::Reflection::GetDouble);
}
absl::StatusOr<int64_t> UnwrapDynamicInt32ValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Int32Value");
return UnwrapValueProto<google::protobuf::Int32Value, int64_t>(
message, google::protobuf::FieldDescriptor::CPPTYPE_INT32,
UnwrapGeneratedInt32ValueProto, &google::protobuf::Reflection::GetInt32);
}
absl::StatusOr<int64_t> UnwrapDynamicInt64ValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Int64Value");
return UnwrapValueProto<google::protobuf::Int64Value, int64_t>(
message, google::protobuf::FieldDescriptor::CPPTYPE_INT64,
UnwrapGeneratedInt64ValueProto, &google::protobuf::Reflection::GetInt64);
}
absl::StatusOr<absl::Cord> UnwrapDynamicStringValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.StringValue");
return UnwrapValueProto<google::protobuf::StringValue, absl::Cord>(
message, google::protobuf::FieldDescriptor::CPPTYPE_STRING,
UnwrapGeneratedStringValueProto, &google::protobuf::Reflection::GetCord);
}
absl::StatusOr<uint64_t> UnwrapDynamicUInt32ValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.UInt32Value");
return UnwrapValueProto<google::protobuf::UInt32Value, uint64_t>(
message, google::protobuf::FieldDescriptor::CPPTYPE_UINT32,
UnwrapGeneratedUInt32ValueProto, &google::protobuf::Reflection::GetUInt32);
}
absl::StatusOr<uint64_t> UnwrapDynamicUInt64ValueProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.UInt64Value");
return UnwrapValueProto<google::protobuf::UInt64Value, uint64_t>(
message, google::protobuf::FieldDescriptor::CPPTYPE_UINT64,
UnwrapGeneratedUInt64ValueProto, &google::protobuf::Reflection::GetUInt64);
}
absl::Status WrapDynamicBoolValueProto(bool value, google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.BoolValue");
return WrapValueProto<google::protobuf::BoolValue, bool>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_BOOL,
WrapGeneratedBoolValueProto, &google::protobuf::Reflection::SetBool);
}
absl::Status WrapDynamicBytesValueProto(const absl::Cord& value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.BytesValue");
return WrapValueProto<google::protobuf::BytesValue, absl::Cord>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_STRING,
WrapGeneratedBytesValueProto,
[](const google::protobuf::Reflection& reflection, google::protobuf::Message* message,
const google::protobuf::FieldDescriptor* field,
const absl::Cord& value) -> void {
reflection.SetString(message, field, value);
});
}
absl::Status WrapDynamicFloatValueProto(float value, google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.FloatValue");
return WrapValueProto<google::protobuf::FloatValue, float>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_FLOAT,
WrapGeneratedFloatValueProto, &google::protobuf::Reflection::SetFloat);
}
absl::Status WrapDynamicDoubleValueProto(double value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.DoubleValue");
return WrapValueProto<google::protobuf::DoubleValue, double>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE,
WrapGeneratedDoubleValueProto, &google::protobuf::Reflection::SetDouble);
}
absl::Status WrapDynamicInt32ValueProto(int32_t value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Int32Value");
return WrapValueProto<google::protobuf::Int32Value, int32_t>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_INT32,
WrapGeneratedInt32ValueProto, &google::protobuf::Reflection::SetInt32);
}
absl::Status WrapDynamicInt64ValueProto(int64_t value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Int64Value");
return WrapValueProto<google::protobuf::Int64Value, int64_t>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_INT64,
WrapGeneratedInt64ValueProto, &google::protobuf::Reflection::SetInt64);
}
absl::Status WrapDynamicUInt32ValueProto(uint32_t value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.UInt32Value");
return WrapValueProto<google::protobuf::UInt32Value, uint32_t>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_UINT32,
WrapGeneratedUInt32ValueProto, &google::protobuf::Reflection::SetUInt32);
}
absl::Status WrapDynamicUInt64ValueProto(uint64_t value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.UInt64Value");
return WrapValueProto<google::protobuf::UInt64Value, uint64_t>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_UINT64,
WrapGeneratedUInt64ValueProto, &google::protobuf::Reflection::SetUInt64);
}
absl::Status WrapDynamicStringValueProto(const absl::Cord& value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.StringValue");
return WrapValueProto<google::protobuf::StringValue, absl::Cord>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_STRING,
WrapGeneratedStringValueProto,
[](const google::protobuf::Reflection& reflection, google::protobuf::Message* message,
const google::protobuf::FieldDescriptor* field,
const absl::Cord& value) -> void {
reflection.SetString(message, field, static_cast<std::string>(value));
});
}
} | #include "extensions/protobuf/internal/wrappers.h"
#include <limits>
#include <memory>
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "extensions/protobuf/internal/wrappers_lite.h"
#include "internal/testing.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor_database.h"
#include "google/protobuf/dynamic_message.h"
namespace cel::extensions::protobuf_internal {
namespace {
using testing::Eq;
using cel::internal::IsOkAndHolds;
using cel::internal::StatusIs;
TEST(BoolWrapper, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedBoolValueProto(google::protobuf::BoolValue()),
IsOkAndHolds(Eq(false)));
}
TEST(BoolWrapper, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::BoolValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicBoolValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.BoolValue"))),
IsOkAndHolds(Eq(false)));
}
TEST(BoolWrapper, GeneratedToProto) {
google::protobuf::BoolValue proto;
ASSERT_OK(WrapGeneratedBoolValueProto(true, proto));
EXPECT_TRUE(proto.value());
}
TEST(BoolWrapper, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::BoolValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory
.GetPrototype(pool.FindMessageTypeByName("google.protobuf.BoolValue"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicBoolValueProto(true, *proto));
EXPECT_TRUE(reflection->GetBool(*proto, value_field));
}
TEST(BytesWrapper, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedBytesValueProto(google::protobuf::BytesValue()),
IsOkAndHolds(Eq(absl::Cord())));
}
TEST(BytesWrapper, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::BytesValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicBytesValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.BytesValue"))),
IsOkAndHolds(Eq(absl::Cord())));
}
TEST(BytesWrapper, GeneratedToProto) {
google::protobuf::BytesValue proto;
ASSERT_OK(WrapGeneratedBytesValueProto(absl::Cord("foo"), proto));
EXPECT_EQ(proto.value(), "foo");
}
TEST(BytesWrapper, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::BytesValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.BytesValue"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicBytesValueProto(absl::Cord("foo"), *proto));
EXPECT_EQ(reflection->GetString(*proto, value_field), "foo");
}
TEST(DoubleWrapper, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedFloatValueProto(google::protobuf::FloatValue()),
IsOkAndHolds(Eq(0.0f)));
EXPECT_THAT(UnwrapGeneratedDoubleValueProto(google::protobuf::DoubleValue()),
IsOkAndHolds(Eq(0.0)));
}
TEST(DoubleWrapper, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::DoubleValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicFloatValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.FloatValue"))),
IsOkAndHolds(Eq(0.0f)));
EXPECT_THAT(UnwrapDynamicDoubleValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.DoubleValue"))),
IsOkAndHolds(Eq(0.0)));
}
TEST(DoubleWrapper, GeneratedToProto) {
{
google::protobuf::FloatValue proto;
ASSERT_OK(WrapGeneratedFloatValueProto(1.0f, proto));
EXPECT_EQ(proto.value(), 1.0f);
}
{
google::protobuf::DoubleValue proto;
ASSERT_OK(WrapGeneratedDoubleValueProto(1.0, proto));
EXPECT_EQ(proto.value(), 1.0);
}
}
TEST(DoubleWrapper, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::DoubleValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.FloatValue"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicFloatValueProto(1.0f, *proto));
EXPECT_EQ(reflection->GetFloat(*proto, value_field), 1.0f);
}
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.DoubleValue"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicDoubleValueProto(1.0, *proto));
EXPECT_EQ(reflection->GetDouble(*proto, value_field), 1.0);
}
}
TEST(IntWrapper, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedInt32ValueProto(google::protobuf::Int32Value()),
IsOkAndHolds(Eq(0)));
EXPECT_THAT(UnwrapGeneratedInt64ValueProto(google::protobuf::Int64Value()),
IsOkAndHolds(Eq(0)));
}
TEST(IntWrapper, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Int64Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicInt32ValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.Int32Value"))),
IsOkAndHolds(Eq(0)));
EXPECT_THAT(UnwrapDynamicInt64ValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.Int64Value"))),
IsOkAndHolds(Eq(0)));
}
TEST(IntWrapper, GeneratedToProto) {
{
google::protobuf::Int32Value proto;
ASSERT_OK(WrapGeneratedInt32ValueProto(1, proto));
EXPECT_EQ(proto.value(), 1);
}
{
google::protobuf::Int64Value proto;
ASSERT_OK(WrapGeneratedInt64ValueProto(1, proto));
EXPECT_EQ(proto.value(), 1);
}
}
TEST(IntWrapper, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Int64Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.Int32Value"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicInt32ValueProto(1, *proto));
EXPECT_EQ(reflection->GetInt32(*proto, value_field), 1);
}
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.Int64Value"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicInt64ValueProto(1, *proto));
EXPECT_EQ(reflection->GetInt64(*proto, value_field), 1);
}
}
TEST(StringWrapper, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedStringValueProto(google::protobuf::StringValue()),
IsOkAndHolds(absl::Cord()));
}
TEST(StringWrapper, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::StringValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicStringValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.StringValue"))),
IsOkAndHolds(absl::Cord()));
}
TEST(StringWrapper, GeneratedToProto) {
google::protobuf::StringValue proto;
ASSERT_OK(WrapGeneratedStringValueProto(absl::Cord("foo"), proto));
EXPECT_EQ(proto.value(), "foo");
}
TEST(StringWrapper, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::StringValue::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.StringValue"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicStringValueProto(absl::Cord("foo"), *proto));
EXPECT_EQ(reflection->GetString(*proto, value_field), "foo");
}
TEST(UintWrapper, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedUInt32ValueProto(google::protobuf::UInt32Value()),
IsOkAndHolds(Eq(0u)));
EXPECT_THAT(UnwrapGeneratedUInt64ValueProto(google::protobuf::UInt64Value()),
IsOkAndHolds(Eq(0u)));
}
TEST(UintWrapper, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::UInt64Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicUInt32ValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.UInt32Value"))),
IsOkAndHolds(Eq(0u)));
EXPECT_THAT(UnwrapDynamicUInt64ValueProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.UInt64Value"))),
IsOkAndHolds(Eq(0u)));
}
TEST(UintWrapper, GeneratedToProto) {
{
google::protobuf::UInt32Value proto;
ASSERT_OK(WrapGeneratedUInt32ValueProto(1, proto));
EXPECT_EQ(proto.value(), 1);
}
{
google::protobuf::UInt64Value proto;
ASSERT_OK(WrapGeneratedUInt64ValueProto(1, proto));
EXPECT_EQ(proto.value(), 1);
}
}
TEST(UintWrapper, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::UInt64Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.UInt32Value"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicUInt32ValueProto(1, *proto));
EXPECT_EQ(reflection->GetUInt32(*proto, value_field), 1);
}
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.UInt64Value"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicUInt64ValueProto(1, *proto));
EXPECT_EQ(reflection->GetUInt64(*proto, value_field), 1);
}
}
}
} | absl::Status WrapDynamicInt64ValueProto(int64_t value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Int64Value");
return WrapValueProto<google::protobuf::Int64Value, int64_t>(
message, value, google::protobuf::FieldDescriptor::CPPTYPE_INT64,
WrapGeneratedInt64ValueProto, &google::protobuf::Reflection::SetInt64);
} | TEST(IntWrapper, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Int64Value::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.Int32Value"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicInt32ValueProto(1, *proto));
EXPECT_EQ(reflection->GetInt32(*proto, value_field), 1);
}
{
std::unique_ptr<google::protobuf::Message> proto =
absl::WrapUnique(factory
.GetPrototype(pool.FindMessageTypeByName(
"google.protobuf.Int64Value"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* value_field = descriptor->FindFieldByName("value");
ASSERT_NE(value_field, nullptr);
ASSERT_OK(WrapDynamicInt64ValueProto(1, *proto));
EXPECT_EQ(reflection->GetInt64(*proto, value_field), 1);
}
} |
#include "tensorflow/core/util/events_writer.h"
#include <stddef.h>
#include <memory>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
EventsWriter::EventsWriter(const string& file_prefix)
: env_(Env::Default()),
file_prefix_(file_prefix),
num_outstanding_events_(0) {}
EventsWriter::~EventsWriter() {
Close().IgnoreError();
}
Status EventsWriter::Init() { return InitWithSuffix(""); }
Status EventsWriter::InitWithSuffix(const string& suffix) {
file_suffix_ = suffix;
return InitIfNeeded();
}
Status EventsWriter::InitIfNeeded() {
if (recordio_writer_ != nullptr) {
CHECK(!filename_.empty());
if (!FileStillExists().ok()) {
if (num_outstanding_events_ > 0) {
LOG(WARNING) << "Re-initialization, attempting to open a new file, "
<< num_outstanding_events_ << " events will be lost.";
}
} else {
return absl::OkStatus();
}
}
int64_t time_in_seconds = env_->NowMicros() / 1000000;
filename_ =
strings::Printf("%s.out.tfevents.%010lld.%s%s", file_prefix_.c_str(),
static_cast<long long>(time_in_seconds),
port::Hostname().c_str(), file_suffix_.c_str());
recordio_writer_.reset();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env_->NewWritableFile(filename_, &recordio_file_),
"Creating writable file ", filename_);
recordio_writer_ = std::make_unique<io::RecordWriter>(recordio_file_.get());
if (recordio_writer_ == nullptr) {
return errors::Unknown("Could not create record writer");
}
num_outstanding_events_ = 0;
VLOG(1) << "Successfully opened events file: " << filename_;
{
Event event;
event.set_wall_time(time_in_seconds);
event.set_file_version(strings::StrCat(kVersionPrefix, kCurrentVersion));
SourceMetadata* source_metadata = event.mutable_source_metadata();
source_metadata->set_writer(kWriterSourceMetadata);
WriteEvent(event);
TF_RETURN_WITH_CONTEXT_IF_ERROR(Flush(), "Flushing first event.");
}
return absl::OkStatus();
}
string EventsWriter::FileName() {
if (filename_.empty()) {
InitIfNeeded().IgnoreError();
}
return filename_;
}
void EventsWriter::WriteSerializedEvent(StringPiece event_str) {
if (recordio_writer_ == nullptr) {
if (!InitIfNeeded().ok()) {
LOG(ERROR) << "Write failed because file could not be opened.";
return;
}
}
num_outstanding_events_++;
recordio_writer_->WriteRecord(event_str).IgnoreError();
}
void EventsWriter::WriteEvent(const Event& event) {
string record;
event.AppendToString(&record);
WriteSerializedEvent(record);
}
Status EventsWriter::Flush() {
if (num_outstanding_events_ == 0) return absl::OkStatus();
CHECK(recordio_file_ != nullptr) << "Unexpected NULL file";
TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_writer_->Flush(), "Failed to flush ",
num_outstanding_events_, " events to ",
filename_);
TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_file_->Sync(), "Failed to sync ",
num_outstanding_events_, " events to ",
filename_);
VLOG(1) << "Wrote " << num_outstanding_events_ << " events to disk.";
num_outstanding_events_ = 0;
return absl::OkStatus();
}
Status EventsWriter::Close() {
Status status = Flush();
if (recordio_file_ != nullptr) {
Status close_status = recordio_file_->Close();
if (!close_status.ok()) {
status = close_status;
}
recordio_writer_.reset(nullptr);
recordio_file_.reset(nullptr);
}
num_outstanding_events_ = 0;
return status;
}
Status EventsWriter::FileStillExists() {
if (env_->FileExists(filename_).ok()) {
return absl::OkStatus();
}
return errors::Unknown("The events file ", filename_, " has disappeared.");
}
} | #include "tensorflow/core/util/events_writer.h"
#include <math.h>
#include <memory>
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
Env* env() { return Env::Default(); }
void WriteSimpleValue(EventsWriter* writer, double wall_time, int64_t step,
const string& tag, float simple_value) {
Event event;
event.set_wall_time(wall_time);
event.set_step(step);
Summary::Value* summ_val = event.mutable_summary()->add_value();
summ_val->set_tag(tag);
summ_val->set_simple_value(simple_value);
writer->WriteEvent(event);
}
void WriteFile(EventsWriter* writer) {
WriteSimpleValue(writer, 1234, 34, "foo", 3.14159);
WriteSimpleValue(writer, 2345, 35, "bar", -42);
}
static bool ReadEventProto(io::RecordReader* reader, uint64* offset,
Event* proto) {
tstring record;
Status s = reader->ReadRecord(offset, &record);
if (!s.ok()) {
return false;
}
return ParseProtoUnlimited(proto, record);
}
void VerifyFile(const string& filename) {
TF_CHECK_OK(env()->FileExists(filename));
std::unique_ptr<RandomAccessFile> event_file;
TF_CHECK_OK(env()->NewRandomAccessFile(filename, &event_file));
io::RecordReader* reader = new io::RecordReader(event_file.get());
uint64 offset = 0;
Event actual;
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
double current_time = env()->NowMicros() / 1000000.0;
EXPECT_LT(fabs(actual.wall_time() - current_time), 5);
EXPECT_EQ(actual.file_version(),
strings::StrCat(EventsWriter::kVersionPrefix,
EventsWriter::kCurrentVersion));
EXPECT_EQ(actual.source_metadata().writer(),
EventsWriter::kWriterSourceMetadata);
Event expected;
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"wall_time: 1234 step: 34 "
"summary { value { tag: 'foo' simple_value: 3.14159 } }",
&expected));
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"wall_time: 2345 step: 35 "
"summary { value { tag: 'bar' simple_value: -42 } }",
&expected));
TF_CHECK_OK(env()->DeleteFile(filename));
delete reader;
}
string GetDirName(const string& suffix) {
return io::JoinPath(testing::TmpDir(), suffix);
}
TEST(EventWriter, WriteFlush) {
string file_prefix = GetDirName("/writeflush_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, WriteClose) {
string file_prefix = GetDirName("/writeclose_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, WriteDelete) {
string file_prefix = GetDirName("/writedelete_test");
EventsWriter* writer = new EventsWriter(file_prefix);
WriteFile(writer);
string filename = writer->FileName();
delete writer;
VerifyFile(filename);
}
TEST(EventWriter, FailFlush) {
string file_prefix = GetDirName("/failflush_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Flush().ok());
}
TEST(EventWriter, FailClose) {
string file_prefix = GetDirName("/failclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Close().ok());
}
TEST(EventWriter, InitWriteClose) {
string file_prefix = GetDirName("/initwriteclose_test");
EventsWriter writer(file_prefix);
TF_EXPECT_OK(writer.Init());
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename1 = writer.FileName();
EXPECT_EQ(filename0, filename1);
VerifyFile(filename1);
}
TEST(EventWriter, NameWriteClose) {
string file_prefix = GetDirName("/namewriteclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
VerifyFile(filename);
}
TEST(EventWriter, NameClose) {
string file_prefix = GetDirName("/nameclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(writer.Close());
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
}
TEST(EventWriter, FileDeletionBeforeWriting) {
string file_prefix = GetDirName("/fdbw_test");
EventsWriter writer(file_prefix);
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
env()->SleepForMicroseconds(
2000000);
TF_ASSERT_OK(env()->DeleteFile(filename0));
TF_EXPECT_OK(writer.Init());
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename1 = writer.FileName();
EXPECT_NE(filename0, filename1);
VerifyFile(filename1);
}
}
} | string EventsWriter::FileName() {
if (filename_.empty()) {
InitIfNeeded().IgnoreError();
}
return filename_;
} | TEST(EventWriter, WriteFlush) {
string file_prefix = GetDirName("/writeflush_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, WriteClose) {
string file_prefix = GetDirName("/writeclose_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, FailFlush) {
string file_prefix = GetDirName("/failflush_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Flush().ok());
}
TEST(EventWriter, FailClose) {
string file_prefix = GetDirName("/failclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Close().ok());
}
TEST(EventWriter, InitWriteClose) {
string file_prefix = GetDirName("/initwriteclose_test");
EventsWriter writer(file_prefix);
TF_EXPECT_OK(writer.Init());
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename1 = writer.FileName();
EXPECT_EQ(filename0, filename1);
VerifyFile(filename1);
}
TEST(EventWriter, NameWriteClose) {
string file_prefix = GetDirName("/namewriteclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
VerifyFile(filename);
}
TEST(EventWriter, NameClose) {
string file_prefix = GetDirName("/nameclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(writer.Close());
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
}
TEST(EventWriter, FileDeletionBeforeWriting) {
string file_prefix = GetDirName("/fdbw_test");
EventsWriter writer(file_prefix);
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
env()->SleepForMicroseconds(
2000000);
TF_ASSERT_OK(env()->DeleteFile(filename0));
TF_EXPECT_OK(writer.Init());
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename1 = writer.FileName();
EXPECT_NE(filename0, filename1);
VerifyFile(filename1);
} |
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#ifdef TFLITE_WITH_RUY
#include "ruy/context.h"
#include "ruy/thread_pool.h"
#else
#include "public/gemmlowp.h"
#endif
namespace tflite {
namespace cpu_backend_threadpool {
#ifdef TFLITE_WITH_RUY
using Task = ruy::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->ruy_context()->mutable_thread_pool()->Execute(
tasks_count, tasks);
}
#else
using Task = gemmlowp::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->gemmlowp_context()->workers_pool()->Execute(tasks_count,
tasks);
}
#endif
}
}
#endif | #include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace {
class TestGenerateArrayOfIncrementingIntsTask
: public cpu_backend_threadpool::Task {
public:
TestGenerateArrayOfIncrementingIntsTask(int* buffer, int start, int end)
: buffer_(buffer), start_(start), end_(end) {}
void Run() override {
for (int i = start_; i < end_; i++) {
buffer_[i] = i;
}
}
private:
int* buffer_;
int start_;
int end_;
};
void TestGenerateArrayOfIncrementingInts(int num_threads, int size) {
std::vector<int> buffer(size);
std::vector<TestGenerateArrayOfIncrementingIntsTask> tasks;
int rough_size_per_thread = size / num_threads;
int start = 0;
for (int thread = 0; thread < num_threads; thread++) {
int end = start + rough_size_per_thread;
if (thread == num_threads - 1) {
end = size;
}
tasks.emplace_back(buffer.data(), start, end);
start = end;
}
ASSERT_EQ(num_threads, tasks.size());
CpuBackendContext context;
context.SetMaxNumThreads(num_threads);
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), &context);
for (int i = 0; i < size; i++) {
ASSERT_EQ(buffer[i], i);
}
}
TEST(CpuBackendThreadpoolTest, OneThreadSize100) {
TestGenerateArrayOfIncrementingInts(1, 100);
}
TEST(CpuBackendThreadpoolTest, ThreeThreadsSize1000000) {
TestGenerateArrayOfIncrementingInts(3, 1000000);
}
TEST(CpuBackendThreadpoolTest, TenThreadsSize1234567) {
TestGenerateArrayOfIncrementingInts(10, 1234567);
}
}
} | template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->ruy_context()->mutable_thread_pool()->Execute(
tasks_count, tasks);
}
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->gemmlowp_context()->workers_pool()->Execute(tasks_count,
tasks);
} | TEST(CpuBackendThreadpoolTest, OneThreadSize100) {
TestGenerateArrayOfIncrementingInts(1, 100);
}
TEST(CpuBackendThreadpoolTest, ThreeThreadsSize1000000) {
TestGenerateArrayOfIncrementingInts(3, 1000000);
}
TEST(CpuBackendThreadpoolTest, TenThreadsSize1234567) {
TestGenerateArrayOfIncrementingInts(10, 1234567);
} |
#ifndef TENSORFLOW_CORE_LIB_GTL_TOP_N_H_
#define TENSORFLOW_CORE_LIB_GTL_TOP_N_H_
#include <stddef.h>
#include <algorithm>
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace gtl {
template <class T, class Cmp = std::greater<T> >
class TopN {
public:
enum State { UNORDERED, BOTTOM_KNOWN, HEAP_SORTED };
using UnsortedIterator = typename std::vector<T>::const_iterator;
explicit TopN(size_t limit) : TopN(limit, Cmp()) {}
TopN(size_t limit, const Cmp &cmp) : limit_(limit), cmp_(cmp) {}
size_t limit() const { return limit_; }
size_t size() const { return elements_.size(); }
bool empty() const { return size() == 0; }
void reserve(size_t n) {
elements_.reserve(std::min(n, limit_ + 1));
}
void push(const T &v) { push(v, nullptr); }
void push(const T &v, T *dropped) { PushInternal(v, dropped); }
void push(T &&v) {
push(std::move(v), nullptr);
}
void push(T &&v, T *dropped) {
PushInternal(std::move(v), dropped);
}
const T &peek_bottom();
std::vector<T> *Extract();
std::vector<T> *ExtractUnsorted();
std::vector<T> *ExtractNondestructive() const;
void ExtractNondestructive(std::vector<T> *output) const;
std::vector<T> *ExtractUnsortedNondestructive() const;
void ExtractUnsortedNondestructive(std::vector<T> *output) const;
UnsortedIterator unsorted_begin() const { return elements_.begin(); }
UnsortedIterator unsorted_end() const { return elements_.end(); }
Cmp *comparator() { return &cmp_; }
void Reset();
private:
template <typename U>
void PushInternal(U &&v, T *dropped);
std::vector<T> elements_;
size_t limit_;
Cmp cmp_;
State state_ = UNORDERED;
};
template <class T, class Cmp>
template <typename U>
void TopN<T, Cmp>::PushInternal(U &&v, T *dropped) {
if (limit_ == 0) {
if (dropped) *dropped = std::forward<U>(v);
return;
}
if (state_ != HEAP_SORTED) {
elements_.push_back(std::forward<U>(v));
if (elements_.size() == limit_ + 1) {
std::make_heap(elements_.begin(), elements_.end(), cmp_);
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.back());
elements_.pop_back();
state_ = HEAP_SORTED;
} else if (state_ == UNORDERED ||
cmp_(elements_.back(), elements_.front())) {
} else {
using std::swap;
swap(elements_.front(), elements_.back());
}
} else {
if (cmp_(v, elements_.front())) {
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.back());
elements_.back() = std::forward<U>(v);
std::push_heap(elements_.begin(), elements_.end(), cmp_);
} else {
if (dropped) *dropped = std::forward<U>(v);
}
}
}
template <class T, class Cmp>
const T &TopN<T, Cmp>::peek_bottom() {
CHECK(!empty());
if (state_ == UNORDERED) {
int min_candidate = 0;
for (size_t i = 1; i < elements_.size(); ++i) {
if (cmp_(elements_[min_candidate], elements_[i])) {
min_candidate = i;
}
}
if (min_candidate != 0) {
using std::swap;
swap(elements_[0], elements_[min_candidate]);
}
state_ = BOTTOM_KNOWN;
}
return elements_.front();
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::Extract() {
auto out = new std::vector<T>;
out->swap(elements_);
if (state_ != HEAP_SORTED) {
std::sort(out->begin(), out->end(), cmp_);
} else {
std::sort_heap(out->begin(), out->end(), cmp_);
}
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsorted() {
auto out = new std::vector<T>;
out->swap(elements_);
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractNondestructive() const {
auto out = new std::vector<T>;
ExtractNondestructive(out);
return out;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractNondestructive(std::vector<T> *output) const {
CHECK(output);
*output = elements_;
if (state_ != HEAP_SORTED) {
std::sort(output->begin(), output->end(), cmp_);
} else {
std::sort_heap(output->begin(), output->end(), cmp_);
}
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsortedNondestructive() const {
auto elements = new std::vector<T>;
ExtractUnsortedNondestructive(elements);
return elements;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractUnsortedNondestructive(std::vector<T> *output) const {
CHECK(output);
*output = elements_;
}
template <class T, class Cmp>
void TopN<T, Cmp>::Reset() {
elements_.clear();
state_ = UNORDERED;
}
}
}
#endif | #include "tensorflow/core/lib/gtl/top_n.h"
#include <string>
#include <vector>
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace {
using tensorflow::string;
using tensorflow::gtl::TopN;
using tensorflow::random::PhiloxRandom;
using tensorflow::random::SimplePhilox;
template <class T>
T ConsumeRawPtr(T *p) {
T tmp = std::move(*p);
delete p;
return tmp;
}
template <class Cmp>
void TestIntTopNHelper(size_t limit, size_t n_elements, const Cmp &cmp,
SimplePhilox *random, bool test_peek,
bool test_extract_unsorted) {
LOG(INFO) << "Testing limit=" << limit << ", n_elements=" << n_elements
<< ", test_peek=" << test_peek
<< ", test_extract_unsorted=" << test_extract_unsorted;
TopN<int, Cmp> top(limit, cmp);
std::vector<int> shadow(n_elements);
for (int i = 0; i != n_elements; ++i) shadow[i] = random->Uniform(limit);
for (int e : shadow) top.push(e);
std::sort(shadow.begin(), shadow.end(), cmp);
size_t top_size = std::min(limit, n_elements);
EXPECT_EQ(top_size, top.size());
if (test_peek && top_size != 0) {
EXPECT_EQ(shadow[top_size - 1], top.peek_bottom());
}
std::vector<int> v;
if (test_extract_unsorted) {
v = ConsumeRawPtr(top.ExtractUnsorted());
std::sort(v.begin(), v.end(), cmp);
} else {
v = ConsumeRawPtr(top.Extract());
}
EXPECT_EQ(top_size, v.size());
for (int i = 0; i != top_size; ++i) {
VLOG(1) << "Top element " << v[i];
EXPECT_EQ(shadow[i], v[i]);
}
}
template <class Cmp>
void TestIntTopN(size_t limit, size_t n_elements, const Cmp &cmp,
SimplePhilox *random) {
TestIntTopNHelper(limit, n_elements, cmp, random, true, false);
TestIntTopNHelper(limit, n_elements, cmp, random, false, false);
TestIntTopNHelper(limit, n_elements, cmp, random, true, true);
TestIntTopNHelper(limit, n_elements, cmp, random, false, true);
}
TEST(TopNTest, Misc) {
PhiloxRandom philox(1, 1);
SimplePhilox random(&philox);
TestIntTopN(0, 5, std::greater<int>(), &random);
TestIntTopN(32, 0, std::greater<int>(), &random);
TestIntTopN(6, 6, std::greater<int>(), &random);
TestIntTopN(6, 6, std::less<int>(), &random);
TestIntTopN(1000, 999, std::greater<int>(), &random);
TestIntTopN(1000, 1000, std::greater<int>(), &random);
TestIntTopN(1000, 1001, std::greater<int>(), &random);
TestIntTopN(2300, 28393, std::less<int>(), &random);
TestIntTopN(30, 100, std::greater<int>(), &random);
TestIntTopN(100, 30, std::less<int>(), &random);
TestIntTopN(size_t(-1), 3, std::greater<int>(), &random);
TestIntTopN(size_t(-1), 0, std::greater<int>(), &random);
TestIntTopN(0, 5, std::greater<int>(), &random);
}
TEST(TopNTest, String) {
LOG(INFO) << "Testing strings";
TopN<string> top(3);
EXPECT_TRUE(top.empty());
top.push("abracadabra");
top.push("waldemar");
EXPECT_EQ(2, top.size());
EXPECT_EQ("abracadabra", top.peek_bottom());
top.push("");
EXPECT_EQ(3, top.size());
EXPECT_EQ("", top.peek_bottom());
top.push("top");
EXPECT_EQ(3, top.size());
EXPECT_EQ("abracadabra", top.peek_bottom());
top.push("Google");
top.push("test");
EXPECT_EQ(3, top.size());
EXPECT_EQ("test", top.peek_bottom());
TopN<string> top2(top);
TopN<string> top3(5);
top3 = top;
EXPECT_EQ("test", top3.peek_bottom());
{
std::vector<string> s = ConsumeRawPtr(top.Extract());
EXPECT_EQ(s[0], "waldemar");
EXPECT_EQ(s[1], "top");
EXPECT_EQ(s[2], "test");
}
top2.push("zero");
EXPECT_EQ(top2.peek_bottom(), "top");
{
std::vector<string> s = ConsumeRawPtr(top2.Extract());
EXPECT_EQ(s[0], "zero");
EXPECT_EQ(s[1], "waldemar");
EXPECT_EQ(s[2], "top");
}
{
std::vector<string> s = ConsumeRawPtr(top3.Extract());
EXPECT_EQ(s[0], "waldemar");
EXPECT_EQ(s[1], "top");
EXPECT_EQ(s[2], "test");
}
TopN<string> top4(3);
for (int i = 0; i < 2; ++i) {
top4.push("abcd");
top4.push("ijkl");
top4.push("efgh");
top4.push("mnop");
std::vector<string> s = ConsumeRawPtr(top4.Extract());
EXPECT_EQ(s[0], "mnop");
EXPECT_EQ(s[1], "ijkl");
EXPECT_EQ(s[2], "efgh");
top4.Reset();
}
}
TEST(TopNTest, Ptr) {
LOG(INFO) << "Testing 2-argument push()";
TopN<string *> topn(3);
for (int i = 0; i < 8; ++i) {
string *dropped = nullptr;
topn.push(new string(std::to_string(i)), &dropped);
delete dropped;
}
for (int i = 8; i > 0; --i) {
string *dropped = nullptr;
topn.push(new string(std::to_string(i)), &dropped);
delete dropped;
}
std::vector<string *> extract = ConsumeRawPtr(topn.Extract());
for (auto &temp : extract) {
delete temp;
}
extract.clear();
}
struct PointeeGreater {
template <typename T>
bool operator()(const T &a, const T &b) const {
return *a > *b;
}
};
TEST(TopNTest, MoveOnly) {
using StrPtr = std::unique_ptr<string>;
TopN<StrPtr, PointeeGreater> topn(3);
for (int i = 0; i < 8; ++i) topn.push(StrPtr(new string(std::to_string(i))));
for (int i = 8; i > 0; --i) topn.push(StrPtr(new string(std::to_string(i))));
std::vector<StrPtr> extract = ConsumeRawPtr(topn.Extract());
EXPECT_EQ(extract.size(), 3);
EXPECT_EQ(*(extract[0]), "8");
EXPECT_EQ(*(extract[1]), "7");
EXPECT_EQ(*(extract[2]), "7");
}
TEST(TopNTest, Nondestructive) {
LOG(INFO) << "Testing Nondestructive extracts";
TopN<int> top4(4);
for (int i = 0; i < 8; ++i) {
top4.push(i);
std::vector<int> v = ConsumeRawPtr(top4.ExtractNondestructive());
EXPECT_EQ(std::min(i + 1, 4), v.size());
for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
}
TopN<int> top3(3);
for (int i = 0; i < 8; ++i) {
top3.push(i);
std::vector<int> v = ConsumeRawPtr(top3.ExtractUnsortedNondestructive());
std::sort(v.begin(), v.end(), std::greater<int>());
EXPECT_EQ(std::min(i + 1, 3), v.size());
for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
}
}
struct ForbiddenCmp {
bool operator()(int lhs, int rhs) const {
LOG(FATAL) << "ForbiddenCmp called " << lhs << " " << rhs;
}
};
TEST(TopNTest, ZeroLimit) {
TopN<int, ForbiddenCmp> top(0);
top.push(1);
top.push(2);
int dropped = -1;
top.push(1, &dropped);
top.push(2, &dropped);
std::vector<int> v;
top.ExtractNondestructive(&v);
EXPECT_EQ(0, v.size());
}
TEST(TopNTest, Iteration) {
TopN<int> top(4);
for (int i = 0; i < 8; ++i) top.push(i);
std::vector<int> actual(top.unsorted_begin(), top.unsorted_end());
std::sort(actual.begin(), actual.end());
EXPECT_EQ(actual.size(), 4);
EXPECT_EQ(actual[0], 4);
EXPECT_EQ(actual[1], 5);
EXPECT_EQ(actual[2], 6);
EXPECT_EQ(actual[3], 7);
}
} | template <class T, class Cmp>
void TopN<T, Cmp>::Reset() {
elements_.clear();
state_ = UNORDERED;
} | TEST(TopNTest, String) {
LOG(INFO) << "Testing strings";
TopN<string> top(3);
EXPECT_TRUE(top.empty());
top.push("abracadabra");
top.push("waldemar");
EXPECT_EQ(2, top.size());
EXPECT_EQ("abracadabra", top.peek_bottom());
top.push("");
EXPECT_EQ(3, top.size());
EXPECT_EQ("", top.peek_bottom());
top.push("top");
EXPECT_EQ(3, top.size());
EXPECT_EQ("abracadabra", top.peek_bottom());
top.push("Google");
top.push("test");
EXPECT_EQ(3, top.size());
EXPECT_EQ("test", top.peek_bottom());
TopN<string> top2(top);
TopN<string> top3(5);
top3 = top;
EXPECT_EQ("test", top3.peek_bottom());
{
std::vector<string> s = ConsumeRawPtr(top.Extract());
EXPECT_EQ(s[0], "waldemar");
EXPECT_EQ(s[1], "top");
EXPECT_EQ(s[2], "test");
}
top2.push("zero");
EXPECT_EQ(top2.peek_bottom(), "top");
{
std::vector<string> s = ConsumeRawPtr(top2.Extract());
EXPECT_EQ(s[0], "zero");
EXPECT_EQ(s[1], "waldemar");
EXPECT_EQ(s[2], "top");
}
{
std::vector<string> s = ConsumeRawPtr(top3.Extract());
EXPECT_EQ(s[0], "waldemar");
EXPECT_EQ(s[1], "top");
EXPECT_EQ(s[2], "test");
}
TopN<string> top4(3);
for (int i = 0; i < 2; ++i) {
top4.push("abcd");
top4.push("ijkl");
top4.push("efgh");
top4.push("mnop");
std::vector<string> s = ConsumeRawPtr(top4.Extract());
EXPECT_EQ(s[0], "mnop");
EXPECT_EQ(s[1], "ijkl");
EXPECT_EQ(s[2], "efgh");
top4.Reset();
}
}
TEST(TopNTest, Nondestructive) {
LOG(INFO) << "Testing Nondestructive extracts";
TopN<int> top4(4);
for (int i = 0; i < 8; ++i) {
top4.push(i);
std::vector<int> v = ConsumeRawPtr(top4.ExtractNondestructive());
EXPECT_EQ(std::min(i + 1, 4), v.size());
for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
}
TopN<int> top3(3);
for (int i = 0; i < 8; ++i) {
top3.push(i);
std::vector<int> v = ConsumeRawPtr(top3.ExtractUnsortedNondestructive());
std::sort(v.begin(), v.end(), std::greater<int>());
EXPECT_EQ(std::min(i + 1, 3), v.size());
for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
}
} |
#include "base/operators.h"
#include <algorithm>
#include <array>
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/log/absl_check.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "base/internal/operators.h"
namespace cel {
namespace {
using base_internal::OperatorData;
struct OperatorDataNameComparer {
using is_transparent = void;
bool operator()(const OperatorData* lhs, const OperatorData* rhs) const {
return lhs->name < rhs->name;
}
bool operator()(const OperatorData* lhs, absl::string_view rhs) const {
return lhs->name < rhs;
}
bool operator()(absl::string_view lhs, const OperatorData* rhs) const {
return lhs < rhs->name;
}
};
struct OperatorDataDisplayNameComparer {
using is_transparent = void;
bool operator()(const OperatorData* lhs, const OperatorData* rhs) const {
return lhs->display_name < rhs->display_name;
}
bool operator()(const OperatorData* lhs, absl::string_view rhs) const {
return lhs->display_name < rhs;
}
bool operator()(absl::string_view lhs, const OperatorData* rhs) const {
return lhs < rhs->display_name;
}
};
#define CEL_OPERATORS_DATA(id, symbol, name, precedence, arity) \
ABSL_CONST_INIT const OperatorData id##_storage = { \
OperatorId::k##id, name, symbol, precedence, arity};
CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_DATA)
#undef CEL_OPERATORS_DATA
#define CEL_OPERATORS_COUNT(id, symbol, name, precedence, arity) +1
using OperatorsArray =
std::array<const OperatorData*,
0 + CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>;
using UnaryOperatorsArray =
std::array<const OperatorData*,
0 + CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>;
using BinaryOperatorsArray =
std::array<const OperatorData*,
0 + CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>;
using TernaryOperatorsArray =
std::array<const OperatorData*,
0 + CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_OPERATORS_COUNT)>;
#undef CEL_OPERATORS_COUNT
ABSL_CONST_INIT absl::once_flag operators_once_flag;
#define CEL_OPERATORS_DO(id, symbol, name, precedence, arity) &id##_storage,
OperatorsArray operators_by_name = {
CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_DO)};
OperatorsArray operators_by_display_name = {
CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATORS_DO)};
UnaryOperatorsArray unary_operators_by_name = {
CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)};
UnaryOperatorsArray unary_operators_by_display_name = {
CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)};
BinaryOperatorsArray binary_operators_by_name = {
CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_OPERATORS_DO)};
BinaryOperatorsArray binary_operators_by_display_name = {
CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_OPERATORS_DO)};
TernaryOperatorsArray ternary_operators_by_name = {
CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)};
TernaryOperatorsArray ternary_operators_by_display_name = {
CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_OPERATORS_DO)};
#undef CEL_OPERATORS_DO
void InitializeOperators() {
std::stable_sort(operators_by_name.begin(), operators_by_name.end(),
OperatorDataNameComparer{});
std::stable_sort(operators_by_display_name.begin(),
operators_by_display_name.end(),
OperatorDataDisplayNameComparer{});
std::stable_sort(unary_operators_by_name.begin(),
unary_operators_by_name.end(), OperatorDataNameComparer{});
std::stable_sort(unary_operators_by_display_name.begin(),
unary_operators_by_display_name.end(),
OperatorDataDisplayNameComparer{});
std::stable_sort(binary_operators_by_name.begin(),
binary_operators_by_name.end(), OperatorDataNameComparer{});
std::stable_sort(binary_operators_by_display_name.begin(),
binary_operators_by_display_name.end(),
OperatorDataDisplayNameComparer{});
std::stable_sort(ternary_operators_by_name.begin(),
ternary_operators_by_name.end(), OperatorDataNameComparer{});
std::stable_sort(ternary_operators_by_display_name.begin(),
ternary_operators_by_display_name.end(),
OperatorDataDisplayNameComparer{});
}
}
UnaryOperator::UnaryOperator(Operator op) : data_(op.data_) {
ABSL_CHECK(op.arity() == Arity::kUnary);
}
BinaryOperator::BinaryOperator(Operator op) : data_(op.data_) {
ABSL_CHECK(op.arity() == Arity::kBinary);
}
TernaryOperator::TernaryOperator(Operator op) : data_(op.data_) {
ABSL_CHECK(op.arity() == Arity::kTernary);
}
#define CEL_UNARY_OPERATOR(id, symbol, name, precedence, arity) \
UnaryOperator Operator::id() { return UnaryOperator(&id##_storage); }
CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_UNARY_OPERATOR)
#undef CEL_UNARY_OPERATOR
#define CEL_BINARY_OPERATOR(id, symbol, name, precedence, arity) \
BinaryOperator Operator::id() { return BinaryOperator(&id##_storage); }
CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_BINARY_OPERATOR)
#undef CEL_BINARY_OPERATOR
#define CEL_TERNARY_OPERATOR(id, symbol, name, precedence, arity) \
TernaryOperator Operator::id() { return TernaryOperator(&id##_storage); }
CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_TERNARY_OPERATOR)
#undef CEL_TERNARY_OPERATOR
absl::optional<Operator> Operator::FindByName(absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it =
std::lower_bound(operators_by_name.cbegin(), operators_by_name.cend(),
input, OperatorDataNameComparer{});
if (it == operators_by_name.cend() || (*it)->name != input) {
return absl::nullopt;
}
return Operator(*it);
}
absl::optional<Operator> Operator::FindByDisplayName(absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(operators_by_display_name.cbegin(),
operators_by_display_name.cend(), input,
OperatorDataDisplayNameComparer{});
if (it == operators_by_name.cend() || (*it)->display_name != input) {
return absl::nullopt;
}
return Operator(*it);
}
absl::optional<UnaryOperator> UnaryOperator::FindByName(
absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(unary_operators_by_name.cbegin(),
unary_operators_by_name.cend(), input,
OperatorDataNameComparer{});
if (it == unary_operators_by_name.cend() || (*it)->name != input) {
return absl::nullopt;
}
return UnaryOperator(*it);
}
absl::optional<UnaryOperator> UnaryOperator::FindByDisplayName(
absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(unary_operators_by_display_name.cbegin(),
unary_operators_by_display_name.cend(), input,
OperatorDataDisplayNameComparer{});
if (it == unary_operators_by_display_name.cend() ||
(*it)->display_name != input) {
return absl::nullopt;
}
return UnaryOperator(*it);
}
absl::optional<BinaryOperator> BinaryOperator::FindByName(
absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(binary_operators_by_name.cbegin(),
binary_operators_by_name.cend(), input,
OperatorDataNameComparer{});
if (it == binary_operators_by_name.cend() || (*it)->name != input) {
return absl::nullopt;
}
return BinaryOperator(*it);
}
absl::optional<BinaryOperator> BinaryOperator::FindByDisplayName(
absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(binary_operators_by_display_name.cbegin(),
binary_operators_by_display_name.cend(), input,
OperatorDataDisplayNameComparer{});
if (it == binary_operators_by_display_name.cend() ||
(*it)->display_name != input) {
return absl::nullopt;
}
return BinaryOperator(*it);
}
absl::optional<TernaryOperator> TernaryOperator::FindByName(
absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(ternary_operators_by_name.cbegin(),
ternary_operators_by_name.cend(), input,
OperatorDataNameComparer{});
if (it == ternary_operators_by_name.cend() || (*it)->name != input) {
return absl::nullopt;
}
return TernaryOperator(*it);
}
absl::optional<TernaryOperator> TernaryOperator::FindByDisplayName(
absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(ternary_operators_by_display_name.cbegin(),
ternary_operators_by_display_name.cend(), input,
OperatorDataDisplayNameComparer{});
if (it == ternary_operators_by_display_name.cend() ||
(*it)->display_name != input) {
return absl::nullopt;
}
return TernaryOperator(*it);
}
} | #include "base/operators.h"
#include <type_traits>
#include "absl/hash/hash_testing.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "base/internal/operators.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::Eq;
using testing::Optional;
template <typename Op, typename OpId>
void TestOperator(Op op, OpId id, absl::string_view name,
absl::string_view display_name, int precedence, Arity arity) {
EXPECT_EQ(op.id(), id);
EXPECT_EQ(Operator(op).id(), static_cast<OperatorId>(id));
EXPECT_EQ(op.name(), name);
EXPECT_EQ(op.display_name(), display_name);
EXPECT_EQ(op.precedence(), precedence);
EXPECT_EQ(op.arity(), arity);
EXPECT_EQ(Operator(op).arity(), arity);
EXPECT_EQ(Op(Operator(op)), op);
}
void TestUnaryOperator(UnaryOperator op, UnaryOperatorId id,
absl::string_view name, absl::string_view display_name,
int precedence) {
TestOperator(op, id, name, display_name, precedence, Arity::kUnary);
}
void TestBinaryOperator(BinaryOperator op, BinaryOperatorId id,
absl::string_view name, absl::string_view display_name,
int precedence) {
TestOperator(op, id, name, display_name, precedence, Arity::kBinary);
}
void TestTernaryOperator(TernaryOperator op, TernaryOperatorId id,
absl::string_view name, absl::string_view display_name,
int precedence) {
TestOperator(op, id, name, display_name, precedence, Arity::kTernary);
}
TEST(Operator, TypeTraits) {
EXPECT_FALSE(std::is_default_constructible_v<Operator>);
EXPECT_TRUE(std::is_copy_constructible_v<Operator>);
EXPECT_TRUE(std::is_move_constructible_v<Operator>);
EXPECT_TRUE(std::is_copy_assignable_v<Operator>);
EXPECT_TRUE(std::is_move_assignable_v<Operator>);
EXPECT_FALSE((std::is_convertible_v<Operator, UnaryOperator>));
EXPECT_FALSE((std::is_convertible_v<Operator, BinaryOperator>));
EXPECT_FALSE((std::is_convertible_v<Operator, TernaryOperator>));
}
TEST(UnaryOperator, TypeTraits) {
EXPECT_FALSE(std::is_default_constructible_v<UnaryOperator>);
EXPECT_TRUE(std::is_copy_constructible_v<UnaryOperator>);
EXPECT_TRUE(std::is_move_constructible_v<UnaryOperator>);
EXPECT_TRUE(std::is_copy_assignable_v<UnaryOperator>);
EXPECT_TRUE(std::is_move_assignable_v<UnaryOperator>);
EXPECT_TRUE((std::is_convertible_v<UnaryOperator, Operator>));
}
TEST(BinaryOperator, TypeTraits) {
EXPECT_FALSE(std::is_default_constructible_v<BinaryOperator>);
EXPECT_TRUE(std::is_copy_constructible_v<BinaryOperator>);
EXPECT_TRUE(std::is_move_constructible_v<BinaryOperator>);
EXPECT_TRUE(std::is_copy_assignable_v<BinaryOperator>);
EXPECT_TRUE(std::is_move_assignable_v<BinaryOperator>);
EXPECT_TRUE((std::is_convertible_v<BinaryOperator, Operator>));
}
TEST(TernaryOperator, TypeTraits) {
EXPECT_FALSE(std::is_default_constructible_v<TernaryOperator>);
EXPECT_TRUE(std::is_copy_constructible_v<TernaryOperator>);
EXPECT_TRUE(std::is_move_constructible_v<TernaryOperator>);
EXPECT_TRUE(std::is_copy_assignable_v<TernaryOperator>);
EXPECT_TRUE(std::is_move_assignable_v<TernaryOperator>);
EXPECT_TRUE((std::is_convertible_v<TernaryOperator, Operator>));
}
#define CEL_UNARY_OPERATOR(id, symbol, name, precedence, arity) \
TEST(UnaryOperator, id) { \
TestUnaryOperator(UnaryOperator::id(), UnaryOperatorId::k##id, name, \
symbol, precedence); \
}
CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_UNARY_OPERATOR)
#undef CEL_UNARY_OPERATOR
#define CEL_BINARY_OPERATOR(id, symbol, name, precedence, arity) \
TEST(BinaryOperator, id) { \
TestBinaryOperator(BinaryOperator::id(), BinaryOperatorId::k##id, name, \
symbol, precedence); \
}
CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_BINARY_OPERATOR)
#undef CEL_BINARY_OPERATOR
#define CEL_TERNARY_OPERATOR(id, symbol, name, precedence, arity) \
TEST(TernaryOperator, id) { \
TestTernaryOperator(TernaryOperator::id(), TernaryOperatorId::k##id, name, \
symbol, precedence); \
}
CEL_INTERNAL_TERNARY_OPERATORS_ENUM(CEL_TERNARY_OPERATOR)
#undef CEL_TERNARY_OPERATOR
TEST(Operator, FindByName) {
EXPECT_THAT(Operator::FindByName("@in"), Optional(Eq(Operator::In())));
EXPECT_THAT(Operator::FindByName("_in_"), Optional(Eq(Operator::OldIn())));
EXPECT_THAT(Operator::FindByName("in"), Eq(absl::nullopt));
EXPECT_THAT(Operator::FindByName(""), Eq(absl::nullopt));
}
TEST(Operator, FindByDisplayName) {
EXPECT_THAT(Operator::FindByDisplayName("-"),
Optional(Eq(Operator::Subtract())));
EXPECT_THAT(Operator::FindByDisplayName("@in"), Eq(absl::nullopt));
EXPECT_THAT(Operator::FindByDisplayName(""), Eq(absl::nullopt));
}
TEST(UnaryOperator, FindByName) {
EXPECT_THAT(UnaryOperator::FindByName("-_"),
Optional(Eq(Operator::Negate())));
EXPECT_THAT(UnaryOperator::FindByName("_-_"), Eq(absl::nullopt));
EXPECT_THAT(UnaryOperator::FindByName(""), Eq(absl::nullopt));
}
TEST(UnaryOperator, FindByDisplayName) {
EXPECT_THAT(UnaryOperator::FindByDisplayName("-"),
Optional(Eq(Operator::Negate())));
EXPECT_THAT(UnaryOperator::FindByDisplayName("&&"), Eq(absl::nullopt));
EXPECT_THAT(UnaryOperator::FindByDisplayName(""), Eq(absl::nullopt));
}
TEST(BinaryOperator, FindByName) {
EXPECT_THAT(BinaryOperator::FindByName("_-_"),
Optional(Eq(Operator::Subtract())));
EXPECT_THAT(BinaryOperator::FindByName("-_"), Eq(absl::nullopt));
EXPECT_THAT(BinaryOperator::FindByName(""), Eq(absl::nullopt));
}
TEST(BinaryOperator, FindByDisplayName) {
EXPECT_THAT(BinaryOperator::FindByDisplayName("-"),
Optional(Eq(Operator::Subtract())));
EXPECT_THAT(BinaryOperator::FindByDisplayName("!"), Eq(absl::nullopt));
EXPECT_THAT(BinaryOperator::FindByDisplayName(""), Eq(absl::nullopt));
}
TEST(TernaryOperator, FindByName) {
EXPECT_THAT(TernaryOperator::FindByName("_?_:_"),
Optional(Eq(TernaryOperator::Conditional())));
EXPECT_THAT(TernaryOperator::FindByName("-_"), Eq(absl::nullopt));
EXPECT_THAT(TernaryOperator::FindByName(""), Eq(absl::nullopt));
}
TEST(TernaryOperator, FindByDisplayName) {
EXPECT_THAT(TernaryOperator::FindByDisplayName(""), Eq(absl::nullopt));
EXPECT_THAT(TernaryOperator::FindByDisplayName("!"), Eq(absl::nullopt));
}
TEST(Operator, SupportsAbslHash) {
#define CEL_OPERATOR(id, symbol, name, precedence, arity) \
Operator(Operator::id()),
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{CEL_INTERNAL_OPERATORS_ENUM(CEL_OPERATOR)}));
#undef CEL_OPERATOR
}
TEST(UnaryOperator, SupportsAbslHash) {
#define CEL_UNARY_OPERATOR(id, symbol, name, precedence, arity) \
UnaryOperator::id(),
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{CEL_INTERNAL_UNARY_OPERATORS_ENUM(CEL_UNARY_OPERATOR)}));
#undef CEL_UNARY_OPERATOR
}
TEST(BinaryOperator, SupportsAbslHash) {
#define CEL_BINARY_OPERATOR(id, symbol, name, precedence, arity) \
BinaryOperator::id(),
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{CEL_INTERNAL_BINARY_OPERATORS_ENUM(CEL_BINARY_OPERATOR)}));
#undef CEL_BINARY_OPERATOR
}
}
} | absl::optional<BinaryOperator> BinaryOperator::FindByDisplayName(
absl::string_view input) {
absl::call_once(operators_once_flag, InitializeOperators);
if (input.empty()) {
return absl::nullopt;
}
auto it = std::lower_bound(binary_operators_by_display_name.cbegin(),
binary_operators_by_display_name.cend(), input,
OperatorDataDisplayNameComparer{});
if (it == binary_operators_by_display_name.cend() ||
(*it)->display_name != input) {
return absl::nullopt;
}
return BinaryOperator(*it);
} | TEST(BinaryOperator, FindByDisplayName) {
EXPECT_THAT(BinaryOperator::FindByDisplayName("-"),
Optional(Eq(Operator::Subtract())));
EXPECT_THAT(BinaryOperator::FindByDisplayName("!"), Eq(absl::nullopt));
EXPECT_THAT(BinaryOperator::FindByDisplayName(""), Eq(absl::nullopt));
} |
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShardDatasetOp::kDatasetType;
constexpr const char* const ShardDatasetOp::kInputDataset;
constexpr const char* const ShardDatasetOp::kNumShards;
constexpr const char* const ShardDatasetOp::kIndex;
constexpr const char* const ShardDatasetOp::kRequireNonEmpty;
constexpr const char* const ShardDatasetOp::kOutputTypes;
constexpr const char* const ShardDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kNextIndex[] = "next_index";
constexpr char kFileShardErrorMessage[] =
"If you are using datasets with distribution strategy, consider setting "
"the auto sharding policy to either DATA or OFF using the "
"`experimental_distribute.auto_shard_policy` option of `tf.data.Options()`."
" Or, split your input files into a larger number of small files such that "
"number of files is greater than number of shards/workers.";
class ShardDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t num_shards, int64_t index,
bool require_non_empty, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)),
num_shards_(num_shards),
index_(index),
input_(input),
require_non_empty_(require_non_empty),
traceme_metadata_(
{{"index", strings::Printf("%lld", static_cast<long long>(index))},
{"num_shards",
strings::Printf("%lld", static_cast<long long>(num_shards))}}) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(num_shards_, index_);
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return n / num_shards_ + (index_ < n % num_shards_ ? 1 : 0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index_ + (num_shards_ * index), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* num_shards = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(num_shards_, &num_shards));
Node* index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(index_, &index));
AttrValue require_non_empty_attr;
b->BuildAttrValue(require_non_empty_, &require_non_empty_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, num_shards, index},
{{kRequireNonEmpty, require_non_empty_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), next_index_(0), element_count_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (dataset()->num_shards_ == kShardHint) {
return errors::FailedPrecondition(
"`tf.data.Dataset.shard(SHARD_HINT, ...)` can only be used in "
"`tf.distribute.Strategy.experimental_distribute_dataset()` with "
"`tf.data.experimental.AutoShardPolicy.HINT` policy, or tf.data "
"service with "
"`tf.data.experimental.service.ShardingPolicy.HINT` processing "
"mode.");
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
int num_to_skip =
(dataset()->index_ - next_index_) % dataset()->num_shards_;
if (num_to_skip < 0) {
num_to_skip += dataset()->num_shards_;
}
int num_skipped;
TF_RETURN_IF_ERROR(
input_impl_->Skip(ctx, num_to_skip, end_of_sequence, &num_skipped));
next_index_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &result, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
next_index_++;
if (dataset()->require_non_empty_ &&
next_index_ < dataset()->num_shards_) {
int num_skipped;
Status s = input_impl_->Skip(ctx, dataset()->num_shards_ - next_index_,
end_of_sequence, &num_skipped);
if (*end_of_sequence || errors::IsOutOfRange(s)) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: the dataset only has ",
next_index_, " file(s), which is not enough for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
} else if (!s.ok()) {
return s;
}
next_index_ = dataset()->num_shards_;
}
*out_tensors = std::move(result);
return absl::OkStatus();
}
Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
auto merge_checkpoint = gtl::MakeCleanup([&ctx_with_index_mapper] {
ctx_with_index_mapper.MergeCheckpoint();
});
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
if (*end_of_sequence && dataset()->require_non_empty_ &&
element_count_ == 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: The dataset does not have "
"enough file(s) for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
}
++element_count_;
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t num_shards = dataset()->num_shards_;
int64_t shard_index = dataset()->index_;
return [parent_index_mapper, num_shards,
shard_index](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t output_index,
parent_index_mapper(element_position));
return output_index * num_shards + shard_index;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(
std::move(args), 1.0 / static_cast<double>(dataset()->num_shards_));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
element_count_ = *ctx->restored_element_count();
return RestoreInput(ctx, reader, input_impl_);
}
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kNextIndex, &next_index_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t next_index_ TF_GUARDED_BY(mu_);
size_t element_count_ TF_GUARDED_BY(mu_);
};
const int64_t num_shards_;
const int64_t index_;
const DatasetBase* const input_;
const bool require_non_empty_;
const TraceMeMetadata traceme_metadata_;
absl::Status random_indexing_compatible_;
};
ShardDatasetOp::ShardDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRequireNonEmpty, &require_non_empty_));
}
void ShardDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t index = 0;
int64_t num_shards = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kNumShards, &num_shards));
OP_REQUIRES(
ctx, num_shards > 0 || num_shards == kShardHint,
errors::InvalidArgument("Number of shards must be greater than zero "
"(currently num_shards = ",
num_shards, ")."));
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kIndex, &index));
OP_REQUIRES(
ctx, (index >= 0 && index < num_shards) || num_shards == kShardHint,
errors::InvalidArgument("Index must be between 0 and ", num_shards - 1,
" (currently index = ", index, ")."));
*output = new Dataset(ctx, num_shards, index, require_non_empty_, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ShardDataset").Device(DEVICE_CPU),
ShardDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "shard_dataset";
class ShardDatasetParams : public DatasetParams {
public:
template <typename T>
ShardDatasetParams(T input_dataset_params, int64_t num_shards, int64_t index,
bool require_non_empty, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_shards_(num_shards),
index_(index),
require_non_empty_(require_non_empty) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return CreateTensors<int64_t>(TensorShape({}), {{num_shards_}, {index_}});
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShardDatasetOp::kInputDataset);
input_names->emplace_back(ShardDatasetOp::kNumShards);
input_names->emplace_back(ShardDatasetOp::kIndex);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("require_non_empty", require_non_empty_);
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ShardDatasetOp::kDatasetType; }
private:
int64_t num_shards_;
int64_t index_;
bool require_non_empty_;
};
class ShardDatasetOpTest : public DatasetOpsTestBase {};
ShardDatasetParams ShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
0,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 1, 1),
5,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
7,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams5() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
4,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams6() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
4,
3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams ShardDatasetParams7() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParamsWithNoElemForEachShard() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
20,
5,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams1() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
7,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams2() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
5,
-3,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams3() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
-3,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ShardDatasetParams InvalidShardDatasetParams4() {
return ShardDatasetParams(RangeDatasetParams(0, 10, 1),
0,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ShardDatasetParams>> GetNextTestCases() {
return {
{ShardDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{}},
{ShardDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_GET_NEXT_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
GetNextTestCases())
TEST_F(ShardDatasetOpTest, DatasetNodeName) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ShardDatasetOpTest, DatasetTypeString) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ShardDatasetOp::kDatasetType)));
}
TEST_F(ShardDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<ShardDatasetParams>> CardinalityTestCases() {
return {{ShardDatasetParams1(),
2},
{ShardDatasetParams2(),
2},
{ShardDatasetParams3(),
0},
{ShardDatasetParams4(),
1},
{ShardDatasetParams5(),
2},
{ShardDatasetParams6(),
2},
{ShardDatasetParams7(),
1}};
}
DATASET_CARDINALITY_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
CardinalityTestCases())
TEST_F(ShardDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShardDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ShardDatasetOpTest, IteratorPrefix) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShardDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ShardDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{ShardDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{2}, {7}})},
{ShardDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {5}})},
{ShardDatasetParams3(),
{0, 1},
{}},
{ShardDatasetParams4(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})},
{ShardDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{4}, {9}})},
{ShardDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{3}, {7}})},
{ShardDatasetParams7(),
{0, 5},
CreateTensors<int64_t>(TensorShape{}, {{5}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ShardDatasetOpTest, NoElemForEachShard) {
auto dataset_params = InvalidShardDatasetParamsWithNoElemForEachShard();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ShardDatasetOpTest, InvalidArguments) {
std::vector<ShardDatasetParams> invalid_dataset_params = {
InvalidShardDatasetParams1(), InvalidShardDatasetParams2(),
InvalidShardDatasetParams3(), InvalidShardDatasetParams4()};
for (const auto& dataset_params : invalid_dataset_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
int num_to_skip =
(dataset()->index_ - next_index_) % dataset()->num_shards_;
if (num_to_skip < 0) {
num_to_skip += dataset()->num_shards_;
}
int num_skipped;
TF_RETURN_IF_ERROR(
input_impl_->Skip(ctx, num_to_skip, end_of_sequence, &num_skipped));
next_index_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &result, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
next_index_++;
if (dataset()->require_non_empty_ &&
next_index_ < dataset()->num_shards_) {
int num_skipped;
Status s = input_impl_->Skip(ctx, dataset()->num_shards_ - next_index_,
end_of_sequence, &num_skipped);
if (*end_of_sequence || errors::IsOutOfRange(s)) {
return absl::InvalidArgumentError(absl::StrCat(
"Could not apply FILE based sharding: the dataset only has ",
next_index_, " file(s), which is not enough for the required ",
dataset()->num_shards_, " shards/workers. ",
kFileShardErrorMessage));
} else if (!s.ok()) {
return s;
}
next_index_ = dataset()->num_shards_;
}
*out_tensors = std::move(result);
return absl::OkStatus();
} | ITERATOR_GET_NEXT_TEST_P(ShardDatasetOpTest, ShardDatasetParams,
GetNextTestCases())
TEST_F(ShardDatasetOpTest, DatasetNodeName) {
auto dataset_params = ShardDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
} |
#include "xla/service/while_loop_fusible_sinking.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsPurelyExpanding(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kBroadcast ||
(instr->opcode() == HloOpcode::kConstant &&
instr->shape().rank() == 0) ||
instr->opcode() == HloOpcode::kIota;
}
bool IsFusionCandidate(const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kRng &&
(instr->IsElementwise() || instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose);
}
}
bool WhileLoopFusibleSinking::IsSinkableFusion(HloInstruction* while_operand) {
absl::InlinedVector<HloInstruction*, 8> worklist;
absl::flat_hash_set<int> visited;
worklist.push_back(while_operand);
while (!worklist.empty()) {
HloInstruction* to_process = worklist.back();
worklist.pop_back();
if (!to_process->IsFusible()) {
return false;
}
if (!visited.insert(to_process->unique_id()).second) {
if (visited.size() > 100) {
return false;
}
continue;
}
if (IsPurelyExpanding(to_process)) {
continue;
}
if (IsFusionCandidate(to_process)) {
for (auto* op : to_process->operands()) {
worklist.push_back(op);
}
continue;
}
return false;
}
return true;
}
HloInstruction* WhileLoopFusibleSinking::CreateSinkableFusion(
HloInstruction* while_operand) {
HloInstruction* fusion =
while_operand->AddInstruction(while_operand->CreateFusion(
while_operand->shape(), HloInstruction::FusionKind::kLoop,
while_operand));
bool did_fuse = IsFusionCandidate(while_operand);
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
continue;
}
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
did_fuse = true;
while (did_fuse) {
did_fuse = false;
for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {
HloInstruction* op = fusion->mutable_operand(i);
if (IsPurelyExpanding(op)) {
fusion->FuseInstruction(op);
did_fuse = true;
break;
}
}
}
return fusion;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::TrySinkingFusiblesIntoWhileLoop(
HloInstruction* while_instr) {
HloComputation* while_cond = while_instr->while_condition();
HloComputation* while_body = while_instr->while_body();
if (call_counts_[while_body] > 1 || call_counts_[while_cond] > 1) {
return false;
}
HloInstruction* init_value = while_instr->mutable_operand(0);
if (init_value->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>>
conditional_gte_index_to_insts =
WhileUtil::GetGTEsMapForWhileConditional(*while_cond);
std::vector<HloInstruction*> invariant_body_gtes =
WhileUtil::GetInvariantGTEsForWhileBody(*while_body);
std::vector<int64_t> tuple_indices;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* invariant_body_gte : invariant_body_gtes) {
int64_t index = invariant_body_gte->tuple_index();
if (while_instr->operand_count() == 0 || init_value->operand_count() == 0) {
CHECK_EQ(while_instr->user_count(), 0);
VLOG(3) << "Each element in the operand tuple of the while instruction '"
<< while_instr->name()
<< "' was an invariant value, whose usage has been replaced "
" directly by the value.";
break;
}
HloInstruction* invariant_value = init_value->mutable_operand(index);
if (absl::c_any_of(invariant_body_gte->users(),
[](const HloInstruction* use) {
switch (use->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kSlice:
return true;
default:
return false;
}
})) {
continue;
}
if (init_value->IsRoot() || init_value->user_count() > 1) {
init_value = init_value->AddInstruction(init_value->Clone());
TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(0, init_value));
}
if (!IsSinkableFusion(invariant_value)) {
continue;
}
HloInstruction* fusion = CreateSinkableFusion(invariant_value);
changed = true;
if (fusion->operand_count() > 0 &&
(while_instr->IsRoot() ||
absl::c_any_of(while_instr->users(), [&](HloInstruction* use) {
return use->opcode() != HloOpcode::kGetTupleElement;
}))) {
auto uses = while_instr->users();
std::vector<HloInstruction*> gtes(init_value->operand_count());
for (int64_t i = 0; i < gtes.size(); ++i) {
gtes[i] = while_instr->AddInstruction(
HloInstruction::CreateGetTupleElement(while_instr, i));
}
HloInstruction* tuple =
while_instr->AddInstruction(HloInstruction::CreateTuple(gtes));
if (while_instr->IsRoot()) {
while_instr->parent()->set_root_instruction(tuple);
}
if (!uses.empty()) {
TF_RETURN_IF_ERROR(while_instr->ReplaceUsesWith(uses, tuple));
}
}
absl::InlinedVector<HloInstruction*, 2> invariant_output_uses;
for (auto use : while_instr->users()) {
if (use->opcode() == HloOpcode::kGetTupleElement &&
use->tuple_index() == index) {
invariant_output_uses.push_back(use);
}
}
for (auto use : invariant_output_uses) {
TF_RETURN_IF_ERROR(
while_instr->parent()->ReplaceInstruction(use, invariant_value));
}
HloInstruction* root = while_body->root_instruction();
HloInstruction* parameter = while_body->parameter_instruction(0);
tuple_indices.resize(fusion->operand_count());
int64_t next_index = init_value->operand_count();
new_operands.resize(fusion->operand_count());
for (int64_t i = 0; i < fusion->operand_count(); ++i) {
init_value->AppendOperand(fusion->mutable_operand(i));
parameter->mutable_shape()->mutable_tuple_shapes()->push_back(
fusion->mutable_operand(i)->shape());
new_operands[i] = root->AddInstruction(
HloInstruction::CreateGetTupleElement(parameter, next_index++));
root->AppendOperand(new_operands[i]);
}
*(init_value->mutable_shape()) = parameter->shape();
*(while_instr->mutable_shape()) = parameter->shape();
*(while_cond->parameter_instruction(0)->mutable_shape()) =
parameter->shape();
*(root->mutable_shape()) = parameter->shape();
auto cloned_fusion = while_body->AddInstruction(
fusion->CloneWithNewOperands(fusion->shape(), new_operands));
TF_RETURN_IF_ERROR(fusion->parent()->RemoveInstruction(fusion));
TF_RETURN_IF_ERROR(
while_body->ReplaceInstruction(invariant_body_gte, cloned_fusion));
TF_RETURN_IF_ERROR(cloned_fusion->Defuse());
}
return changed;
}
absl::StatusOr<bool> WhileLoopFusibleSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_counts_.clear();
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
call_counts_[while_instr->while_body()]++;
call_counts_[while_instr->while_condition()]++;
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingFusiblesIntoWhileLoop(while_instr));
changed |= result;
}
return changed;
}
} | #include "xla/service/while_loop_fusible_sinking.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using WhileLoopFusibleSinkingTest = HloTestBase;
TEST_F(WhileLoopFusibleSinkingTest, SinkOneFusible) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] parameter(0)
const_1 = f32[2] iota(), iota_dimension=0
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Iota()), _));
}
TEST_F(WhileLoopFusibleSinkingTest, SinkMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Multiply(op::Add(op::Iota(), op::Iota()),
op::Broadcast())),
_, _));
}
TEST_F(WhileLoopFusibleSinkingTest, NoSinkSlicedMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
z = s32[] constant(0)
j = s32[] constant(3)
ds = f32[1,7] dynamic-slice(p_body.1, j, z), dynamic_slice_sizes={1,7}
r = f32[7] reshape(ds)
b = f32[5,7] broadcast(r), dimensions={1}
a = add(b, p_body.0)
add.0 = add(a, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
EXPECT_FALSE(changed);
}
}
} | absl::StatusOr<bool> WhileLoopFusibleSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_counts_.clear();
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
HloPredicateIsOp<HloOpcode::kWhile>);
}
for (HloInstruction* while_instr : while_instrs) {
call_counts_[while_instr->while_body()]++;
call_counts_[while_instr->while_condition()]++;
}
for (HloInstruction* while_instr : while_instrs) {
TF_ASSIGN_OR_RETURN(bool result,
TrySinkingFusiblesIntoWhileLoop(while_instr));
changed |= result;
}
return changed;
} | TEST_F(WhileLoopFusibleSinkingTest, SinkOneFusible) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2],f32[2]) parameter(0)
p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
add.0 = f32[2] add(p_body.0, p_body.1)
ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[2],f32[2]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[2] parameter(0)
const_1 = f32[2] iota(), iota_dimension=0
while_init = (f32[2],f32[2]) tuple(const_0, const_1)
ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Iota()), _));
}
TEST_F(WhileLoopFusibleSinkingTest, SinkMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
add.0 = add(p_body.0, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
ASSERT_TRUE(changed);
auto* while_body = module->GetComputationWithName("body");
EXPECT_THAT(while_body->root_instruction(),
op::Tuple(op::Add(_, op::Multiply(op::Add(op::Iota(), op::Iota()),
op::Broadcast())),
_, _));
}
TEST_F(WhileLoopFusibleSinkingTest, NoSinkSlicedMask) {
const char* const hlo_string = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[5,7],f32[5,7]) parameter(0)
p_body.0 = get-tuple-element(p_body), index=0
p_body.1 = get-tuple-element(p_body), index=1
z = s32[] constant(0)
j = s32[] constant(3)
ds = f32[1,7] dynamic-slice(p_body.1, j, z), dynamic_slice_sizes={1,7}
r = f32[7] reshape(ds)
b = f32[5,7] broadcast(r), dimensions={1}
a = add(b, p_body.0)
add.0 = add(a, p_body.1)
ROOT root = tuple(add.0, p_body.1)
}
condition {
p_cond = (f32[5,7],f32[5,7]) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const_0 = f32[5,7] parameter(0)
p = f32[5] parameter(1)
a = f32[5,7] iota(), iota_dimension=0
b = f32[5,7] iota(), iota_dimension=1
c = add(a, b)
d = f32[5,7] broadcast(p), dimensions={0}
mask = multiply(c,d)
while_init = tuple(const_0, mask)
ROOT while = while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopFusibleSinking{}.Run(module.get()));
EXPECT_FALSE(changed);
} |