Unnamed: 0
int64 0
409
| Code
stringlengths 131
27.3k
| Unit Test
stringlengths 89
30.5k
|
---|---|---|
0 | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_MLIR_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_API_V1_COMPILE_MLIR_UTIL_H_
#include <memory>
#include "absl/base/attributes.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_argument.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status ConvertMLIRToXlaComputation(
mlir::ModuleOp module_op, llvm::StringRef device_type,
xla::XlaComputation* xla_computation, bool use_tuple_args,
bool enable_op_fallback, bool return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns =
{},
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {},
llvm::StringRef module_name = llvm::StringRef());
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
void CreateConvertMlirToXlaHloPipeline(
mlir::OpPassManager& pm, llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo = true, bool allow_partial_conversion = false);
struct TensorOrResourceShape {
TensorShape shape;
bool is_resource = false;
};
ABSL_DEPRECATED("Not meant to be used directly and should be a util.")
Status RefineShapes(llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
mlir::ModuleOp module);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status BuildHloFromTf(mlir::ModuleOp module_op, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes);
ABSL_DEPRECATED("Not meant to be used directly and should be a util.")
Status PopulateResultIOInfo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args, bool use_resource_updates_for_aliases,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
absl::StatusOr<std::string> CompileMlirToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
llvm::StringRef module_name = llvm::StringRef(),
bool lower_to_xla_hlo = true);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
absl::StatusOr<std::string> CompileSerializedMlirToXlaHlo(
llvm::StringRef mlir_module_string, llvm::ArrayRef<TensorShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {},
llvm::StringRef module_name = llvm::StringRef(),
bool lower_to_xla_hlo = true);
ABSL_DEPRECATED("Use v2/legalize_tf.h::LegalizeMlirToHlo instead.")
Status CompileGraphToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<XlaArgument> args,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes);
ABSL_DEPRECATED(
"Use v1/compile_tf_graph.h::CompileTensorflowGraphToHlo instead.")
Status BuildHloFromGraph(
const Graph& graph, xla::XlaBuilder& builder,
mlir::MLIRContext& mlir_context, llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns, bool unconditionally_use_output_shapes,
llvm::ArrayRef<XlaArgument> args, llvm::ArrayRef<std::string> control_rets,
llvm::StringRef device_type, const FunctionLibraryDefinition& flib_def,
const GraphDebugInfo& debug_info,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes = {});
static inline Status CompileToHloGraphAnalysisFailedError() {
return errors::Internal("disabled after graph analysis");
}
void RegisterConvertMlirToXlaHloPipelineWithDefaults();
}
#endif
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <memory>
#include <string>
#include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/shape.h"
#include "xla/translate/mhlo_to_hlo/layout_util.h"
#include "xla/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/core_platform_payloads.pb.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kGroupSizeAttrName =
"tf2xla.collective_info.group_size";
constexpr absl::string_view kGroupKeyAttrName =
"tf2xla.collective_info.group_key";
absl::StatusOr<TensorShape> GetTensorShapeFromXlaArgument(
const XlaArgument& arg) {
if (absl::holds_alternative<xla::Shape>(arg.shape)) {
TensorShape arg_shape;
TF_RETURN_IF_ERROR(
XLAShapeToTensorShape(std::get<xla::Shape>(arg.shape), &arg_shape));
return arg_shape;
} else {
return std::get<TensorShape>(arg.shape);
}
}
Status MaybeRewriteLayoutWithShardedShape(
mlir::StringAttr sharding,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* shape) {
if (!sharding) return absl::OkStatus();
xla::OpSharding op_sharding;
if (tensorflow::DecodeShardingAttribute(sharding, op_sharding).failed()) {
return errors::InvalidArgument("failed to parse sharding '",
sharding.getValue().str(), "'");
}
std::optional<xla::HloSharding> hlo_sharding;
TF_ASSIGN_OR_RETURN(hlo_sharding, xla::HloSharding::FromProto(op_sharding));
TF_RETURN_IF_ERROR(RewriteLayoutWithShardedShape(
hlo_sharding, false, shape_determination_fns, shape));
return absl::OkStatus();
}
Status GetXlaInputShapes(
mlir::ModuleOp module, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
std::vector<xla::Shape>* xla_input_shapes) {
xla_input_shapes->clear();
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
TF_RET_CHECK(main_func != nullptr) << "No main function found";
mlir::FunctionType func_type = main_func.getFunctionType();
int num_args = func_type.getNumInputs();
xla_input_shapes->reserve(num_args);
std::vector<xla::Shape> individual_arg_shapes;
individual_arg_shapes.reserve(num_args);
for (int i = 0; i < num_args; ++i) {
individual_arg_shapes.emplace_back();
xla::Shape& xla_shape = individual_arg_shapes.back();
DataType arg_dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(func_type.getInput(i), &arg_dtype));
auto layout_preference = shape_determination_fns.layout_preference_fn(
arg_shapes[i].shape, arg_dtype, std::nullopt);
TF_ASSIGN_OR_RETURN(xla_shape,
shape_determination_fns.shape_representation_fn(
arg_shapes[i].shape, arg_dtype,
false, layout_preference));
auto sharding =
main_func.getArgAttrOfType<mlir::StringAttr>(i, "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &xla_shape));
}
if (use_tuple_args) {
xla_input_shapes->push_back(
xla::ShapeUtil::MakeTupleShape(individual_arg_shapes));
} else {
*xla_input_shapes = individual_arg_shapes;
}
return absl::OkStatus();
}
mlir::RankedTensorType GetBufferType(mlir::Type ty) {
auto ranked_ty = mlir::dyn_cast_or_null<mlir::RankedTensorType>(ty);
if (!ranked_ty) return {};
int64_t rank = ranked_ty.getRank();
llvm::SmallVector<int64_t, 4> dims = llvm::to_vector<4>(ranked_ty.getShape());
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
ranked_ty.getEncoding());
if (encoding && !encoding.getBounds().empty()) {
for (int64_t dim = 0; dim < rank; ++dim) {
if (dims[dim] == mlir::ShapedType::kDynamic) {
dims[dim] = encoding.getBounds()[dim];
}
}
}
return GetTypeFromTFTensorShape(dims, ranked_ty.getElementType());
}
Status GetOutputInfo(
mlir::ModuleOp module, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* xla_output_shape, std::vector<XlaOutputDescription>* outputs,
std::vector<XlaResourceUpdate>* resource_updates) {
auto shape_representation_fn_no_fast_memory =
[shape_determination_fns](
const xla::Shape& xla_shape) -> absl::StatusOr<xla::Shape> {
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
TF_ASSIGN_OR_RETURN(DataType dtype, EncodePrimitiveTypeAsDataType(
xla_shape.element_type()));
auto layout_preference = shape_determination_fns.layout_preference_fn(
shape, dtype, std::nullopt);
return shape_determination_fns.shape_representation_fn(
shape, dtype, false, layout_preference);
};
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
mlir::FunctionType func_type = main_func.getFunctionType();
outputs->clear();
outputs->reserve(func_type.getNumResults());
resource_updates->clear();
resource_updates->reserve(func_type.getNumResults());
std::vector<xla::Shape> shapes;
shapes.reserve(func_type.getNumResults());
llvm::SmallDenseMap<unsigned, unsigned> output_to_input_alias;
for (unsigned i = 0; i < main_func.getNumArguments(); ++i)
if (auto aliasing_output = main_func.getArgAttrOfType<mlir::IntegerAttr>(
i, "tf.aliasing_output"))
output_to_input_alias[aliasing_output.getInt()] = i;
auto return_op = main_func.begin()->getTerminator();
for (const auto& type_and_idx : llvm::enumerate(func_type.getResults())) {
size_t idx = type_and_idx.index();
auto result_ty = mlir::cast<mlir::RankedTensorType>(type_and_idx.value());
mlir::RankedTensorType buffer_ty = result_ty;
if (!buffer_ty.hasStaticShape()) {
mlir::Value return_val = return_op->getOperand(idx);
if (auto owner = mlir::dyn_cast_or_null<mlir::tensor::CastOp>(
return_val.getDefiningOp())) {
buffer_ty = GetBufferType(owner.getOperand().getType());
if (!buffer_ty || !buffer_ty.hasStaticShape()) {
return errors::InvalidArgument(
"results needs to be static or bounded");
}
}
}
xla::Shape shape = xla::TypeToShape(buffer_ty);
if (shape.element_type() == xla::PRIMITIVE_TYPE_INVALID) {
return errors::InvalidArgument("XLA conversion failed for MLIR type.");
}
TF_ASSIGN_OR_RETURN(shape, shape_representation_fn_no_fast_memory(shape));
if (!result_ty.hasStaticShape()) {
int64_t rank = result_ty.getRank();
for (int64_t dim = 0; dim < rank; ++dim) {
if (result_ty.isDynamicDim(dim)) {
shape.set_dynamic_dimension(dim, true);
}
}
}
auto sharding = main_func.getResultAttrOfType<mlir::StringAttr>(
type_and_idx.index(), "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &shape));
auto tensor_type =
mlir::dyn_cast<mlir::RankedTensorType>(type_and_idx.value());
shapes.push_back(shape);
auto it = output_to_input_alias.find(type_and_idx.index());
if (it != output_to_input_alias.end() && use_resource_updates_for_aliases) {
resource_updates->emplace_back();
XlaResourceUpdate& resource_update = resource_updates->back();
resource_update.input_index = it->getSecond();
resource_update.modified = true;
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &resource_update.type));
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &resource_update.shape));
continue;
}
outputs->emplace_back();
XlaOutputDescription& out_desc = outputs->back();
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &out_desc.type));
out_desc.is_constant = false;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &out_desc.shape));
out_desc.input_index =
it != output_to_input_alias.end() ? it->getSecond() : -1;
out_desc.is_tensor_list = false;
}
*xla_output_shape = xla::ShapeUtil::MakeTupleShape(shapes);
return absl::OkStatus();
}
void GetInputMappingForMlir(int num_inputs, std::vector<int>* input_mapping) {
input_mapping->resize(num_inputs, 0);
std::iota(input_mapping->begin(), input_mapping->end(), 0);
}
static void RegisterDialects(mlir::DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
}
bool CanInlineFunctionsPostLegalization(llvm::StringRef device_type) {
return device_type == DEVICE_TPU_XLA_JIT;
}
void AddLegalizationPasses(mlir::OpPassManager& pm, bool legalize_chlo,
llvm::StringRef device_type, bool enable_op_fallback,
bool lower_to_xla_hlo) {
if (lower_to_xla_hlo) {
mlir::quant::stablehlo::AddQuantizationLoweringPasses(pm);
pm.addPass(mlir::mhlo::createLegalizeTFPass(
legalize_chlo,
device_type, enable_op_fallback));
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateInfeedsOpsXlaAdjustLayoutPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
}
}
}
void CreateConvertMlirToXlaHloPipeline(
mlir::OpPassManager& pm, llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo, bool allow_partial_conversion) {
bool legalize_chlo = true;
pm.addNestedPass<mlir::func::FuncOp>(
tensorflow::tf2xla::internal::CreateInputLoweringMetricsPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateTFXLADeviceSpecificTransformsPass(device_type));
pm.addPass(mlir::TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateDropWhileShapeInvariantPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateReplicateTensorListInitOpsPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::createSCCPPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(mlir::createSCCPPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorListOpsDecompositionPass());
}
pm.addPass(mlir::TF::CreateStackOpsDecompositionPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorArrayOpsDecompositionPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TFDevice::CreateDecomposeResourceOpsPass());
pm.addPass(mlir::TF::CreatePromoteResourcesToArgsPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::TF::CreateLowerQuantizedPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::CreateConvertTFQuantTypesPass());
if (lower_to_xla_hlo) {
for (auto& target_pass : custom_legalization_passes) {
pm.addNestedPass<mlir::func::FuncOp>(std::move(target_pass));
}
pm.addPass(mlir::mhlo::CreateLegalizeTFCollectivePass());
}
AddLegalizationPasses(pm, legalize_chlo, device_type, enable_op_fallback,
lower_to_xla_hlo);
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::CreateLegalizeTFCommunicationPass());
if (!allow_partial_conversion) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateVerifyTFXLALegalizationPass(legalize_chlo));
}
}
if (CanInlineFunctionsPostLegalization(device_type)) {
pm.addPass(mlir::createInlinerPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
}
Status RefineShapes(llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
mlir::ModuleOp module) {
auto producer_or = GetTfGraphProducerVersion(module);
if (!producer_or.ok()) ret | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_builder.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::OpPassManager;
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::HasSubstr;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
TEST(LegalizeMlirTest, LegalizesModule) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("mhlo.const"));
}
TEST(LegalizeMlirTest, FailsLegalizesModule) {
constexpr char failed_legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
failed_legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_FALSE(status.ok());
EXPECT_EQ(count.Delta("tf.DoesntExist", "Unknown"), 1);
}
TEST(CompileMlirUtil, CreatesPipeline) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{});
EXPECT_FALSE(pass_manager.getPasses().empty());
}
TEST(CompileMlirUtil, HasLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kLegalizeTfPass));
}
TEST(CompileMlirUtil, DoesNotHaveLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{},
false);
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, Not(HasSubstr(kLegalizeTfPass)));
}
TEST(CompileMlirUtil, DoesNotLowerWhenTold) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result,
{},
"",
false);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("tf.Const"));
}
TEST(CompileMlirUtil, CanonicalizationIsExplicitDuringInlining) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kInlinePass =
"inline{default-pipeline=canonicalize "
"inlining-threshold=4294967295 max-iterations=4 }";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kInlinePass));
}
TEST(LegalizeMlirTest, LegalizesModuleWithDynamicShape) {
constexpr char legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>> {
%0 = "tf.Identity"(%arg0) : (tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
func.return %0 : tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
}
})";
std::vector<tensorflow::TensorShape> arg_shapes = {{1}};
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
}
absl::StatusOr<std::unique_ptr<Graph>> BuildOpGraphWithOutputShapes() {
DataType data_type = DT_INT32;
std::initializer_list<int64_t> dims = {2, 3, 4, 5};
Tensor tensor(data_type, TensorShape(dims));
for (int i = 0; i < 2 * 3 * 4 * 5; ++i) {
tensor.flat<int32>()(i) = i;
}
NodeDef node;
auto builder = NodeDefBuilder("some_node", "Const")
.Attr("dtype", data_type)
.Attr("value", tensor);
AttrValue shape_attr;
TensorShapeProto* shape_proto = shape_attr.mutable_list()->add_shape();
shape_proto->add_dim()->set_size(1);
builder.Attr("_output_shapes", shape_attr);
TF_RETURN_IF_ERROR(builder.Finalize(&node));
return CreateSingleOpGraph(node, {}, {DataType::DT_INT32});
}
absl::Status BuildHloFromGraph(Graph& graph, bool use_output_shapes) {
xla::XlaBuilder builder(
::testing::UnitTest::GetInstance()->current_test_info()->name());
mlir::MLIRContext mlir_context;
llvm::SmallVector<xla::XlaOp, 4> xla_params;
std::vector<xla::XlaOp> returns(1);
return BuildHloFromGraph(graph, builder, mlir_context, xla_params, returns,
use_output_shapes, {},
{}, DEVICE_TPU,
FunctionLibraryDefinition(OpRegistry::Global()),
{},
{});
}
TEST(CompileMlirUtil, UsesCorrectOriginalShapeWithoutOutputShapes) {
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildOpGraphWithOutputShapes());
auto build_result = BuildHloFromGraph(*graph, false);
TF_ASSERT_OK(build_result);
}
TEST(CompileMlirUtil, UsesIncorrectOutputShapesWhenPresent) {
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildOpGraphWithOutputShapes());
auto build_result = BuildHloFromGraph(*graph, true);
ASSERT_FALSE(build_result.ok());
EXPECT_THAT(build_result.message(),
HasSubstr("op operand type 'tensor<2x3x4x5xi32>' and result type "
"'tensor<1xi32>' are cast incompatible"));
}
}
} |
1 | #ifndef TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_XNNPACK_PLUGIN_H_
#define TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_XNNPACK_PLUGIN_H_
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#ifdef __cplusplus
extern "C" {
#endif
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi();
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
auto options(TfLiteXNNPackDelegateOptionsDefault());
const auto* xnnpack_settings = tflite_settings->xnnpack_settings();
if (xnnpack_settings) {
options.num_threads = xnnpack_settings->num_threads();
if (xnnpack_settings->flags()) {
options.flags = xnnpack_settings->flags();
}
if (xnnpack_settings->experimental_weight_cache_file_path()) {
options.experimental_weight_cache_file_path =
xnnpack_settings->experimental_weight_cache_file_path()->c_str();
}
}
return TfLiteXNNPackDelegateCreate(&options);
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
TfLiteXNNPackDelegateDelete(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteXnnpackDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/xnnpack_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class XnnpackTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
void SetUp() override {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~XnnpackTest() override = default;
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
constexpr int XnnpackTest::kNumThreadsForTest;
TEST_F(XnnpackTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteXnnpackDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, SetsCorrectThreadCount) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
pthreadpool_t threadpool =
static_cast<pthreadpool_t>(TfLiteXNNPackDelegateGetThreadPool(delegate));
int thread_count = pthreadpool_get_threads_count(threadpool);
EXPECT_EQ(thread_count, kNumThreadsForTest);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsByDefault) {
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesSpecifiedFlagsWhenNonzero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
TEST_F(XnnpackTest, UsesDefaultFlagsWhenZero) {
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_flags(
tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
TfLiteDelegate *delegate =
TfLiteXnnpackDelegatePluginCApi()->create(settings_);
int flags = TfLiteXNNPackDelegateGetFlags(delegate);
EXPECT_EQ(flags, TfLiteXNNPackDelegateOptionsDefault().flags);
TfLiteXnnpackDelegatePluginCApi()->destroy(delegate);
}
} |
2 | #ifndef QUICHE_QUIC_CORE_CRYPTO_CERTIFICATE_UTIL_H_
#define QUICHE_QUIC_CORE_CRYPTO_CERTIFICATE_UTIL_H_
#include <string>
#include "absl/strings/string_view.h"
#include "openssl/evp.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_export.h"
namespace quic {
struct QUICHE_EXPORT CertificateTimestamp {
uint16_t year;
uint8_t month;
uint8_t day;
uint8_t hour;
uint8_t minute;
uint8_t second;
};
struct QUICHE_EXPORT CertificateOptions {
absl::string_view subject;
uint64_t serial_number;
CertificateTimestamp validity_start;
CertificateTimestamp validity_end;
};
QUICHE_EXPORT bssl::UniquePtr<EVP_PKEY> MakeKeyPairForSelfSignedCertificate();
QUICHE_EXPORT std::string CreateSelfSignedCertificate(
EVP_PKEY& key, const CertificateOptions& options);
}
#endif
#include "quiche/quic/core/crypto/certificate_util.h"
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "openssl/bn.h"
#include "openssl/bytestring.h"
#include "openssl/digest.h"
#include "openssl/ec_key.h"
#include "openssl/mem.h"
#include "openssl/pkcs7.h"
#include "openssl/pool.h"
#include "openssl/rsa.h"
#include "openssl/stack.h"
#include "quiche/quic/core/crypto/boring_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
bool AddEcdsa256SignatureAlgorithm(CBB* cbb) {
static const uint8_t kEcdsaWithSha256[] = {0x2a, 0x86, 0x48, 0xce,
0x3d, 0x04, 0x03, 0x02};
CBB sequence, oid;
if (!CBB_add_asn1(cbb, &sequence, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&sequence, &oid, CBS_ASN1_OBJECT)) {
return false;
}
if (!CBB_add_bytes(&oid, kEcdsaWithSha256, sizeof(kEcdsaWithSha256))) {
return false;
}
return CBB_flush(cbb);
}
bool AddName(CBB* cbb, absl::string_view name) {
static const uint8_t kCommonName[] = {0x55, 0x04, 0x03};
static const uint8_t kCountryName[] = {0x55, 0x04, 0x06};
static const uint8_t kOrganizationName[] = {0x55, 0x04, 0x0a};
static const uint8_t kOrganizationalUnitName[] = {0x55, 0x04, 0x0b};
std::vector<std::string> attributes =
absl::StrSplit(name, ',', absl::SkipEmpty());
if (attributes.empty()) {
QUIC_LOG(ERROR) << "Missing DN or wrong format";
return false;
}
CBB rdns;
if (!CBB_add_asn1(cbb, &rdns, CBS_ASN1_SEQUENCE)) {
return false;
}
for (const std::string& attribute : attributes) {
std::vector<std::string> parts =
absl::StrSplit(absl::StripAsciiWhitespace(attribute), '=');
if (parts.size() != 2) {
QUIC_LOG(ERROR) << "Wrong DN format at " + attribute;
return false;
}
const std::string& type_string = parts[0];
const std::string& value_string = parts[1];
absl::Span<const uint8_t> type_bytes;
if (type_string == "CN") {
type_bytes = kCommonName;
} else if (type_string == "C") {
type_bytes = kCountryName;
} else if (type_string == "O") {
type_bytes = kOrganizationName;
} else if (type_string == "OU") {
type_bytes = kOrganizationalUnitName;
} else {
QUIC_LOG(ERROR) << "Unrecognized type " + type_string;
return false;
}
CBB rdn, attr, type, value;
if (!CBB_add_asn1(&rdns, &rdn, CBS_ASN1_SET) ||
!CBB_add_asn1(&rdn, &attr, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&attr, &type, CBS_ASN1_OBJECT) ||
!CBB_add_bytes(&type, type_bytes.data(), type_bytes.size()) ||
!CBB_add_asn1(&attr, &value,
type_string == "C" ? CBS_ASN1_PRINTABLESTRING
: CBS_ASN1_UTF8STRING) ||
!AddStringToCbb(&value, value_string) || !CBB_flush(&rdns)) {
return false;
}
}
if (!CBB_flush(cbb)) {
return false;
}
return true;
}
bool CBBAddTime(CBB* cbb, const CertificateTimestamp& timestamp) {
CBB child;
std::string formatted_time;
const bool is_utc_time = (1950 <= timestamp.year && timestamp.year < 2050);
if (is_utc_time) {
uint16_t year = timestamp.year - 1900;
if (year >= 100) {
year -= 100;
}
formatted_time = absl::StrFormat("%02d", year);
if (!CBB_add_asn1(cbb, &child, CBS_ASN1_UTCTIME)) {
return false;
}
} else {
formatted_time = absl::StrFormat("%04d", timestamp.year);
if (!CBB_add_asn1(cbb, &child, CBS_ASN1_GENERALIZEDTIME)) {
return false;
}
}
absl::StrAppendFormat(&formatted_time, "%02d%02d%02d%02d%02dZ",
timestamp.month, timestamp.day, timestamp.hour,
timestamp.minute, timestamp.second);
static const size_t kGeneralizedTimeLength = 15;
static const size_t kUTCTimeLength = 13;
QUICHE_DCHECK_EQ(formatted_time.size(),
is_utc_time ? kUTCTimeLength : kGeneralizedTimeLength);
return AddStringToCbb(&child, formatted_time) && CBB_flush(cbb);
}
bool CBBAddExtension(CBB* extensions, absl::Span<const uint8_t> oid,
bool critical, absl::Span<const uint8_t> contents) {
CBB extension, cbb_oid, cbb_contents;
if (!CBB_add_asn1(extensions, &extension, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&extension, &cbb_oid, CBS_ASN1_OBJECT) ||
!CBB_add_bytes(&cbb_oid, oid.data(), oid.size()) ||
(critical && !CBB_add_asn1_bool(&extension, 1)) ||
!CBB_add_asn1(&extension, &cbb_contents, CBS_ASN1_OCTETSTRING) ||
!CBB_add_bytes(&cbb_contents, contents.data(), contents.size()) ||
!CBB_flush(extensions)) {
return false;
}
return true;
}
bool IsEcdsa256Key(const EVP_PKEY& evp_key) {
if (EVP_PKEY_id(&evp_key) != EVP_PKEY_EC) {
return false;
}
const EC_KEY* key = EVP_PKEY_get0_EC_KEY(&evp_key);
if (key == nullptr) {
return false;
}
const EC_GROUP* group = EC_KEY_get0_group(key);
if (group == nullptr) {
return false;
}
return EC_GROUP_get_curve_name(group) == NID_X9_62_prime256v1;
}
}
bssl::UniquePtr<EVP_PKEY> MakeKeyPairForSelfSignedCertificate() {
bssl::UniquePtr<EVP_PKEY_CTX> context(
EVP_PKEY_CTX_new_id(EVP_PKEY_EC, nullptr));
if (!context) {
return nullptr;
}
if (EVP_PKEY_keygen_init(context.get()) != 1) {
return nullptr;
}
if (EVP_PKEY_CTX_set_ec_paramgen_curve_nid(context.get(),
NID_X9_62_prime256v1) != 1) {
return nullptr;
}
EVP_PKEY* raw_key = nullptr;
if (EVP_PKEY_keygen(context.get(), &raw_key) != 1) {
return nullptr;
}
return bssl::UniquePtr<EVP_PKEY>(raw_key);
}
std::string CreateSelfSignedCertificate(EVP_PKEY& key,
const CertificateOptions& options) {
std::string error;
if (!IsEcdsa256Key(key)) {
QUIC_LOG(ERROR) << "CreateSelfSignedCert only accepts ECDSA P-256 keys";
return error;
}
bssl::ScopedCBB cbb;
CBB tbs_cert, version, validity;
uint8_t* tbs_cert_bytes;
size_t tbs_cert_len;
if (!CBB_init(cbb.get(), 64) ||
!CBB_add_asn1(cbb.get(), &tbs_cert, CBS_ASN1_SEQUENCE) ||
!CBB_add_asn1(&tbs_cert, &version,
CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) ||
!CBB_add_asn1_uint64(&version, 2) ||
!CBB_add_asn1_uint64(&tbs_cert, options.serial_number) ||
!AddEcdsa256SignatureAlgorithm(&tbs_cert) ||
!AddName(&tbs_cert, options.subject) ||
!CBB_add_asn1(&tbs_cert, &validity, CBS_ASN1_SEQUENCE) ||
!CBBAddTime(&validity, options.validity_start) ||
!CBBAddTime(&validity, options.validity_end) ||
!AddName(&tbs_cert, options.subject) ||
!EVP_marshal_public_key(&tbs_cert, &key)) {
return error;
}
CBB outer_extensions, extensions;
if (!CBB_add_asn1(&tbs_cert, &outer_extensions,
3 | CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED) ||
!CBB_add_asn1(&outer_extensions, &extensions, CBS_ASN1_SEQUENCE)) {
return error;
}
constexpr uint8_t kKeyUsageOid[] = {0x55, 0x1d, 0x0f};
constexpr uint8_t kKeyUsageContent[] = {
0x3,
0x2,
0x0,
0x80,
};
CBBAddExtension(&extensions, kKeyUsageOid, true, kKeyUsageContent);
if (!CBB_finish(cbb.get(), &tbs_cert_bytes, &tbs_cert_len)) {
return error;
}
bssl::UniquePtr<uint8_t> delete_tbs_cert_bytes(tbs_cert_bytes);
CBB cert, signature;
bssl::ScopedEVP_MD_CTX ctx;
uint8_t* sig_out;
size_t sig_len;
uint8_t* cert_bytes;
size_t cert_len;
if (!CBB_init(cbb.get(), tbs_cert_len) ||
!CBB_add_asn1(cbb.get(), &cert, CBS_ASN1_SEQUENCE) ||
!CBB_add_bytes(&cert, tbs_cert_bytes, tbs_cert_len) ||
!AddEcdsa256SignatureAlgorithm(&cert) ||
!CBB_add_asn1(&cert, &signature, CBS_ASN1_BITSTRING) ||
!CBB_add_u8(&signature, 0 ) ||
!EVP_DigestSignInit(ctx.get(), nullptr, EVP_sha256(), nullptr, &key) ||
!EVP_DigestSign(ctx.get(), nullptr, &sig_len, tbs_cert_bytes,
tbs_cert_len) ||
!CBB_reserve(&signature, &sig_out, sig_len) ||
!EVP_DigestSign(ctx.get(), sig_out, &sig_len, tbs_cert_bytes,
tbs_cert_len) ||
!CBB_did_write(&signature, sig_len) ||
!CBB_finish(cbb.get(), &cert_bytes, &cert_len)) {
return error;
}
bssl::UniquePtr<uint8_t> delete_cert_bytes(cert_bytes);
return std::string(reinterpret_cast<char*>(cert_bytes), cert_len);
}
} | #include "quiche/quic/core/crypto/certificate_util.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/certificate_view.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_test_output.h"
namespace quic {
namespace test {
namespace {
TEST(CertificateUtilTest, CreateSelfSignedCertificate) {
bssl::UniquePtr<EVP_PKEY> key = MakeKeyPairForSelfSignedCertificate();
ASSERT_NE(key, nullptr);
CertificatePrivateKey cert_key(std::move(key));
CertificateOptions options;
options.subject = "CN=subject";
options.serial_number = 0x12345678;
options.validity_start = {2020, 1, 1, 0, 0, 0};
options.validity_end = {2049, 12, 31, 0, 0, 0};
std::string der_cert =
CreateSelfSignedCertificate(*cert_key.private_key(), options);
ASSERT_FALSE(der_cert.empty());
QuicSaveTestOutput("CertificateUtilTest_CreateSelfSignedCert.crt", der_cert);
std::unique_ptr<CertificateView> cert_view =
CertificateView::ParseSingleCertificate(der_cert);
ASSERT_NE(cert_view, nullptr);
EXPECT_EQ(cert_view->public_key_type(), PublicKeyType::kP256);
std::optional<std::string> subject = cert_view->GetHumanReadableSubject();
ASSERT_TRUE(subject.has_value());
EXPECT_EQ(*subject, options.subject);
EXPECT_TRUE(
cert_key.ValidForSignatureAlgorithm(SSL_SIGN_ECDSA_SECP256R1_SHA256));
EXPECT_TRUE(cert_key.MatchesPublicKey(*cert_view));
}
}
}
} |
3 | #ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
#define ABSL_BASE_INTERNAL_SYSINFO_H_
#ifndef _WIN32
#include <sys/types.h>
#endif
#include <cstdint>
#include "absl/base/config.h"
#include "absl/base/port.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
double NominalCPUFrequency();
int NumCPUs();
#ifdef _WIN32
using pid_t = uint32_t;
#endif
pid_t GetTID();
pid_t GetCachedTID();
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/base/internal/sysinfo.h"
#include "absl/base/attributes.h"
#ifdef _WIN32
#include <windows.h>
#else
#include <fcntl.h>
#include <pthread.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
#ifdef __linux__
#include <sys/syscall.h>
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/sysctl.h>
#endif
#ifdef __FreeBSD__
#include <pthread_np.h>
#endif
#ifdef __NetBSD__
#include <lwp.h>
#endif
#if defined(__myriad2__)
#include <rtems.h>
#endif
#include <string.h>
#include <cassert>
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <limits>
#include <thread>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/unscaledcycleclock.h"
#include "absl/base/thread_annotations.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
#if defined(_WIN32)
DWORD Win32CountSetBits(ULONG_PTR bitMask) {
for (DWORD bitSetCount = 0; ; ++bitSetCount) {
if (bitMask == 0) return bitSetCount;
bitMask &= bitMask - 1;
}
}
int Win32NumCPUs() {
#pragma comment(lib, "kernel32.lib")
using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
DWORD info_size = sizeof(Info);
Info* info(static_cast<Info*>(malloc(info_size)));
if (info == nullptr) return 0;
bool success = GetLogicalProcessorInformation(info, &info_size);
if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
free(info);
info = static_cast<Info*>(malloc(info_size));
if (info == nullptr) return 0;
success = GetLogicalProcessorInformation(info, &info_size);
}
DWORD logicalProcessorCount = 0;
if (success) {
Info* ptr = info;
DWORD byteOffset = 0;
while (byteOffset + sizeof(Info) <= info_size) {
switch (ptr->Relationship) {
case RelationProcessorCore:
logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask);
break;
case RelationNumaNode:
case RelationCache:
case RelationProcessorPackage:
break;
default:
break;
}
byteOffset += sizeof(Info);
ptr++;
}
}
free(info);
return static_cast<int>(logicalProcessorCount);
}
#endif
}
static int GetNumCPUs() {
#if defined(__myriad2__)
return 1;
#elif defined(_WIN32)
const int hardware_concurrency = Win32NumCPUs();
return hardware_concurrency ? hardware_concurrency : 1;
#elif defined(_AIX)
return sysconf(_SC_NPROCESSORS_ONLN);
#else
return static_cast<int>(std::thread::hardware_concurrency());
#endif
}
#if defined(_WIN32)
static double GetNominalCPUFrequency() {
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
return 1.0;
#else
#pragma comment(lib, "advapi32.lib")
HKEY key;
if (RegOpenKeyExA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
KEY_READ, &key) == ERROR_SUCCESS) {
DWORD type = 0;
DWORD data = 0;
DWORD data_size = sizeof(data);
auto result = RegQueryValueExA(key, "~MHz", nullptr, &type,
reinterpret_cast<LPBYTE>(&data), &data_size);
RegCloseKey(key);
if (result == ERROR_SUCCESS && type == REG_DWORD &&
data_size == sizeof(data)) {
return data * 1e6;
}
}
return 1.0;
#endif
}
#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
static double GetNominalCPUFrequency() {
unsigned freq;
size_t size = sizeof(freq);
int mib[2] = {CTL_HW, HW_CPU_FREQ};
if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
return static_cast<double>(freq);
}
return 1.0;
}
#else
static bool ReadLongFromFile(const char *file, long *value) {
bool ret = false;
#if defined(_POSIX_C_SOURCE)
const int file_mode = (O_RDONLY | O_CLOEXEC);
#else
const int file_mode = O_RDONLY;
#endif
int fd = open(file, file_mode);
if (fd != -1) {
char line[1024];
char *err;
memset(line, '\0', sizeof(line));
ssize_t len;
do {
len = read(fd, line, sizeof(line) - 1);
} while (len < 0 && errno == EINTR);
if (len <= 0) {
ret = false;
} else {
const long temp_value = strtol(line, &err, 10);
if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
*value = temp_value;
ret = true;
}
}
close(fd);
}
return ret;
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
static int64_t ReadMonotonicClockNanos() {
struct timespec t;
#ifdef CLOCK_MONOTONIC_RAW
int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
#else
int rc = clock_gettime(CLOCK_MONOTONIC, &t);
#endif
if (rc != 0) {
ABSL_INTERNAL_LOG(
FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
}
return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
}
class UnscaledCycleClockWrapperForInitializeFrequency {
public:
static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
};
struct TimeTscPair {
int64_t time;
int64_t tsc;
};
static TimeTscPair GetTimeTscPair() {
int64_t best_latency = std::numeric_limits<int64_t>::max();
TimeTscPair best;
for (int i = 0; i < 10; ++i) {
int64_t t0 = ReadMonotonicClockNanos();
int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
int64_t t1 = ReadMonotonicClockNanos();
int64_t latency = t1 - t0;
if (latency < best_latency) {
best_latency = latency;
best.time = t0;
best.tsc = tsc;
}
}
return best;
}
static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
auto t0 = GetTimeTscPair();
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = sleep_nanoseconds;
while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
auto t1 = GetTimeTscPair();
double elapsed_ticks = t1.tsc - t0.tsc;
double elapsed_time = (t1.time - t0.time) * 1e-9;
return elapsed_ticks / elapsed_time;
}
static double MeasureTscFrequency() {
double last_measurement = -1.0;
int sleep_nanoseconds = 1000000;
for (int i = 0; i < 8; ++i) {
double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
if (measurement * 0.99 < last_measurement &&
last_measurement < measurement * 1.01) {
return measurement;
}
last_measurement = measurement;
sleep_nanoseconds *= 2;
}
return last_measurement;
}
#endif
static double GetNominalCPUFrequency() {
long freq = 0;
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
return freq * 1e3;
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
return MeasureTscFrequency();
#else
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
return freq * 1e3;
}
return 1.0;
#endif
}
#endif
ABSL_CONST_INIT static once_flag init_num_cpus_once;
ABSL_CONST_INIT static int num_cpus = 0;
int NumCPUs() {
base_internal::LowLevelCallOnce(
&init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
return num_cpus;
}
ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
double NominalCPUFrequency() {
base_internal::LowLevelCallOnce(
&init_nominal_cpu_frequency_once,
[]() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
return nominal_cpu_frequency;
}
#if defined(_WIN32)
pid_t GetTID() {
return pid_t{GetCurrentThreadId()};
}
#elif defined(__linux__)
#ifndef SYS_gettid
#define SYS_gettid __NR_gettid
#endif
pid_t GetTID() {
return static_cast<pid_t>(syscall(SYS_gettid));
}
#elif defined(__akaros__)
pid_t GetTID() {
if (in_vcore_context())
return 0;
return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
}
#elif defined(__myriad2__)
pid_t GetTID() {
uint32_t tid;
rtems_task_ident(RTEMS_SELF, 0, &tid);
return tid;
}
#elif defined(__APPLE__)
pid_t GetTID() {
uint64_t tid;
pthread_threadid_np(nullptr, &tid);
return static_cast<pid_t>(tid);
}
#elif defined(__FreeBSD__)
pid_t GetTID() { return static_cast<pid_t>(pthread_getthreadid_np()); }
#elif defined(__OpenBSD__)
pid_t GetTID() { return getthrid(); }
#elif defined(__NetBSD__)
pid_t GetTID() { return static_cast<pid_t>(_lwp_self()); }
#elif defined(__native_client__)
pid_t GetTID() {
auto* thread = pthread_self();
static_assert(sizeof(pid_t) == sizeof(thread),
"In NaCL int expected to be the same size as a pointer");
return reinterpret_cast<pid_t>(thread);
}
#else
pid_t GetTID() {
return static_cast<pid_t>(pthread_self());
}
#endif
pid_t GetCachedTID() {
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local pid_t thread_id = GetTID();
return thread_id;
#else
return GetTID();
#endif
}
}
ABSL_NAMESPACE_END
} | #include "absl/base/internal/sysinfo.h"
#ifndef _WIN32
#include <sys/types.h>
#include <unistd.h>
#endif
#include <thread>
#include <unordered_set>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
TEST(SysinfoTest, NumCPUs) {
EXPECT_NE(NumCPUs(), 0)
<< "NumCPUs() should not have the default value of 0";
}
TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID());
#ifdef __native_client__
return;
#endif
for (int i = 0; i < 10; ++i) {
constexpr int kNumThreads = 10;
Barrier all_threads_done(kNumThreads);
std::vector<std::thread> threads;
Mutex mutex;
std::unordered_set<pid_t> tids;
for (int j = 0; j < kNumThreads; ++j) {
threads.push_back(std::thread([&]() {
pid_t id = GetTID();
{
MutexLock lock(&mutex);
ASSERT_TRUE(tids.find(id) == tids.end());
tids.insert(id);
}
all_threads_done.Block();
}));
}
for (auto& thread : threads) {
thread.join();
}
}
}
#ifdef __linux__
TEST(SysinfoTest, LinuxGetTID) {
EXPECT_EQ(GetTID(), getpid());
}
#endif
}
}
ABSL_NAMESPACE_END
} |
4 | #ifndef QUICHE_COMMON_HTTP_HTTP_HEADER_BLOCK_H_
#define QUICHE_COMMON_HTTP_HTTP_HEADER_BLOCK_H_
#include <stddef.h>
#include <functional>
#include <list>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "quiche/common/http/http_header_storage.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_linked_hash_map.h"
#include "quiche/common/quiche_text_utils.h"
namespace quiche {
namespace test {
class HttpHeaderBlockPeer;
class ValueProxyPeer;
}
#ifndef SPDY_HEADER_DEBUG
#if !defined(NDEBUG) || defined(ADDRESS_SANITIZER)
#define SPDY_HEADER_DEBUG 1
#else
#define SPDY_HEADER_DEBUG 0
#endif
#endif
class QUICHE_EXPORT HttpHeaderBlock {
private:
class QUICHE_EXPORT HeaderValue {
public:
HeaderValue(HttpHeaderStorage* storage, absl::string_view key,
absl::string_view initial_value);
HeaderValue(HeaderValue&& other);
HeaderValue& operator=(HeaderValue&& other);
void set_storage(HttpHeaderStorage* storage);
HeaderValue(const HeaderValue& other) = delete;
HeaderValue& operator=(const HeaderValue& other) = delete;
~HeaderValue();
void Append(absl::string_view fragment);
absl::string_view value() const { return as_pair().second; }
const std::pair<absl::string_view, absl::string_view>& as_pair() const;
size_t SizeEstimate() const { return size_; }
private:
absl::string_view ConsolidatedValue() const;
mutable HttpHeaderStorage* storage_;
mutable Fragments fragments_;
mutable std::pair<absl::string_view, absl::string_view> pair_;
size_t size_ = 0;
size_t separator_size_ = 0;
};
typedef quiche::QuicheLinkedHashMap<absl::string_view, HeaderValue,
quiche::StringPieceCaseHash,
quiche::StringPieceCaseEqual>
MapType;
public:
typedef std::pair<absl::string_view, absl::string_view> value_type;
enum class InsertResult {
kInserted,
kReplaced,
};
class QUICHE_EXPORT iterator {
public:
typedef std::pair<absl::string_view, absl::string_view> value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef std::forward_iterator_tag iterator_category;
typedef MapType::iterator::difference_type difference_type;
typedef const value_type& const_reference;
typedef const value_type* const_pointer;
explicit iterator(MapType::const_iterator it);
iterator(const iterator& other);
~iterator();
const_reference operator*() const {
#if SPDY_HEADER_DEBUG
QUICHE_CHECK(!dereference_forbidden_);
#endif
return it_->second.as_pair();
}
const_pointer operator->() const { return &(this->operator*()); }
bool operator==(const iterator& it) const { return it_ == it.it_; }
bool operator!=(const iterator& it) const { return !(*this == it); }
iterator& operator++() {
it_++;
return *this;
}
iterator operator++(int) {
auto ret = *this;
this->operator++();
return ret;
}
#if SPDY_HEADER_DEBUG
void forbid_dereference() { dereference_forbidden_ = true; }
#endif
private:
MapType::const_iterator it_;
#if SPDY_HEADER_DEBUG
bool dereference_forbidden_ = false;
#endif
};
typedef iterator const_iterator;
HttpHeaderBlock();
HttpHeaderBlock(const HttpHeaderBlock& other) = delete;
HttpHeaderBlock(HttpHeaderBlock&& other);
~HttpHeaderBlock();
HttpHeaderBlock& operator=(const HttpHeaderBlock& other) = delete;
HttpHeaderBlock& operator=(HttpHeaderBlock&& other);
HttpHeaderBlock Clone() const;
bool operator==(const HttpHeaderBlock& other) const;
bool operator!=(const HttpHeaderBlock& other) const;
std::string DebugString() const;
iterator begin() { return wrap_iterator(map_.begin()); }
iterator end() { return wrap_iterator(map_.end()); }
const_iterator begin() const { return wrap_const_iterator(map_.begin()); }
const_iterator end() const { return wrap_const_iterator(map_.end()); }
bool empty() const { return map_.empty(); }
size_t size() const { return map_.size(); }
iterator find(absl::string_view key) { return wrap_iterator(map_.find(key)); }
const_iterator find(absl::string_view key) const {
return wrap_const_iterator(map_.find(key));
}
bool contains(absl::string_view key) const { return find(key) != end(); }
void erase(absl::string_view key);
void clear();
InsertResult insert(const value_type& value);
void AppendValueOrAddHeader(const absl::string_view key,
const absl::string_view value);
class QUICHE_EXPORT ValueProxy {
public:
~ValueProxy();
ValueProxy(ValueProxy&& other);
ValueProxy& operator=(ValueProxy&& other);
ValueProxy(const ValueProxy& other) = delete;
ValueProxy& operator=(const ValueProxy& other) = delete;
ValueProxy& operator=(absl::string_view value);
bool operator==(absl::string_view value) const;
std::string as_string() const;
private:
friend class HttpHeaderBlock;
friend class test::ValueProxyPeer;
ValueProxy(HttpHeaderBlock* block,
HttpHeaderBlock::MapType::iterator lookup_result,
const absl::string_view key,
size_t* spdy_header_block_value_size);
HttpHeaderBlock* block_;
HttpHeaderBlock::MapType::iterator lookup_result_;
absl::string_view key_;
size_t* spdy_header_block_value_size_;
bool valid_;
};
ABSL_MUST_USE_RESULT ValueProxy operator[](const absl::string_view key);
size_t TotalBytesUsed() const { return key_size_ + value_size_; }
private:
friend class test::HttpHeaderBlockPeer;
inline iterator wrap_iterator(MapType::const_iterator inner_iterator) const {
#if SPDY_HEADER_DEBUG
iterator outer_iterator(inner_iterator);
if (inner_iterator == map_.end()) {
outer_iterator.forbid_dereference();
}
return outer_iterator;
#else
return iterator(inner_iterator);
#endif
}
inline const_iterator wrap_const_iterator(
MapType::const_iterator inner_iterator) const {
#if SPDY_HEADER_DEBUG
const_iterator outer_iterator(inner_iterator);
if (inner_iterator == map_.end()) {
outer_iterator.forbid_dereference();
}
return outer_iterator;
#else
return iterator(inner_iterator);
#endif
}
void AppendHeader(const absl::string_view key, const absl::string_view value);
absl::string_view WriteKey(const absl::string_view key);
size_t bytes_allocated() const;
MapType map_;
HttpHeaderStorage storage_;
size_t key_size_ = 0;
size_t value_size_ = 0;
};
inline bool operator==(absl::string_view lhs,
const HttpHeaderBlock::ValueProxy& rhs) {
return rhs == lhs;
}
}
#endif
#include "quiche/common/http/http_header_block.h"
#include <string.h>
#include <algorithm>
#include <ios>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
const size_t kInitialMapBuckets = 11;
const char kCookieKey[] = "cookie";
const char kNullSeparator = 0;
absl::string_view SeparatorForKey(absl::string_view key) {
if (key == kCookieKey) {
static absl::string_view cookie_separator = "; ";
return cookie_separator;
} else {
return absl::string_view(&kNullSeparator, 1);
}
}
}
HttpHeaderBlock::HeaderValue::HeaderValue(HttpHeaderStorage* storage,
absl::string_view key,
absl::string_view initial_value)
: storage_(storage),
fragments_({initial_value}),
pair_({key, {}}),
size_(initial_value.size()),
separator_size_(SeparatorForKey(key).size()) {}
HttpHeaderBlock::HeaderValue::HeaderValue(HeaderValue&& other)
: storage_(other.storage_),
fragments_(std::move(other.fragments_)),
pair_(std::move(other.pair_)),
size_(other.size_),
separator_size_(other.separator_size_) {}
HttpHeaderBlock::HeaderValue& HttpHeaderBlock::HeaderValue::operator=(
HeaderValue&& other) {
storage_ = other.storage_;
fragments_ = std::move(other.fragments_);
pair_ = std::move(other.pair_);
size_ = other.size_;
separator_size_ = other.separator_size_;
return *this;
}
void HttpHeaderBlock::HeaderValue::set_storage(HttpHeaderStorage* storage) {
storage_ = storage;
}
HttpHeaderBlock::HeaderValue::~HeaderValue() = default;
absl::string_view HttpHeaderBlock::HeaderValue::ConsolidatedValue() const {
if (fragments_.empty()) {
return absl::string_view();
}
if (fragments_.size() > 1) {
fragments_ = {
storage_->WriteFragments(fragments_, SeparatorForKey(pair_.first))};
}
return fragments_[0];
}
void HttpHeaderBlock::HeaderValue::Append(absl::string_view fragment) {
size_ += (fragment.size() + separator_size_);
fragments_.push_back(fragment);
}
const std::pair<absl::string_view, absl::string_view>&
HttpHeaderBlock::HeaderValue::as_pair() const {
pair_.second = ConsolidatedValue();
return pair_;
}
HttpHeaderBlock::iterator::iterator(MapType::const_iterator it) : it_(it) {}
HttpHeaderBlock::iterator::iterator(const iterator& other) = default;
HttpHeaderBlock::iterator::~iterator() = default;
HttpHeaderBlock::ValueProxy::ValueProxy(
HttpHeaderBlock* block, HttpHeaderBlock::MapType::iterator lookup_result,
const absl::string_view key, size_t* spdy_header_block_value_size)
: block_(block),
lookup_result_(lookup_result),
key_(key),
spdy_header_block_value_size_(spdy_header_block_value_size),
valid_(true) {}
HttpHeaderBlock::ValueProxy::ValueProxy(ValueProxy&& other)
: block_(other.block_),
lookup_result_(other.lookup_result_),
key_(other.key_),
spdy_header_block_value_size_(other.spdy_header_block_value_size_),
valid_(true) {
other.valid_ = false;
}
HttpHeaderBlock::ValueProxy& HttpHeaderBlock::ValueProxy::operator=(
HttpHeaderBlock::ValueProxy&& other) {
block_ = other.block_;
lookup_result_ = other.lookup_result_;
key_ = other.key_;
valid_ = true;
other.valid_ = false;
spdy_header_block_value_size_ = other.spdy_header_block_value_size_;
return *this;
}
HttpHeaderBlock::ValueProxy::~ValueProxy() {
if (valid_ && lookup_result_ == block_->map_.end()) {
block_->storage_.Rewind(key_);
}
}
HttpHeaderBlock::ValueProxy& HttpHeaderBlock::ValueProxy::operator=(
absl::string_view value) {
*spdy_header_block_value_size_ += value.size();
HttpHeaderStorage* storage = &block_->storage_;
if (lookup_result_ == block_->map_.end()) {
QUICHE_DVLOG(1) << "Inserting: (" << key_ << ", " << value << ")";
lookup_result_ =
block_->map_
.emplace(std::make_pair(
key_, HeaderValue(storage, key_, storage->Write(value))))
.first;
} else {
QUICHE_DVLOG(1) << "Updating key: " << key_ << " with value: " << value;
*spdy_header_block_value_size_ -= lookup_result_->second.SizeEstimate();
lookup_result_->second = HeaderValue(storage, key_, storage->Write(value));
}
return *this;
}
bool HttpHeaderBlock::ValueProxy::operator==(absl::string_view value) const {
if (lookup_result_ == block_->map_.end()) {
return false;
} else {
return value == lookup_result_->second.value();
}
}
std::string HttpHeaderBlock::ValueProxy::as_string() const {
if (lookup_result_ == block_->map_.end()) {
return "";
} else {
return std::string(lookup_result_->second.value());
}
}
HttpHeaderBlock::HttpHeaderBlock() : map_(kInitialMapBuckets) {}
HttpHeaderBlock::HttpHeaderBlock(HttpHeaderBlock&& other)
: map_(kInitialMapBuckets) {
map_.swap(other.map_);
storage_ = std::move(other.storage_);
for (auto& p : map_) {
p.second.set_storage(&storage_);
}
key_size_ = other.key_size_;
value_size_ = other.value_size_;
}
HttpHeaderBlock::~HttpHeaderBlock() = default;
HttpHeaderBlock& HttpHeaderBlock::operator=(HttpHeaderBlock&& other) {
map_.swap(other.map_);
storage_ = std::move(other.storage_);
for (auto& p : map_) {
p.second.set_storage(&storage_);
}
key_size_ = other.key_size_;
value_size_ = other.value_size_;
return *this;
}
HttpHeaderBlock HttpHeaderBlock::Clone() const {
HttpHeaderBlock copy;
for (const auto& p : *this) {
copy.AppendHeader(p.first, p.second);
}
return copy;
}
bool HttpHeaderBlock::operator==(const HttpHeaderBlock& other) const {
return size() == other.size() && std::equal(begin(), end(), other.begin());
}
bool HttpHeaderBlock::operator!=(const HttpHeaderBlock& other) const {
return !(operator==(other));
}
std::string HttpHeaderBlock::DebugString() const {
if (empty()) {
return "{}";
}
std::string output = "\n{\n";
for (auto it = begin(); it != end(); ++it) {
absl::StrAppend(&output, " ", it->first, " ", it->second, "\n");
}
absl::StrAppend(&output, "}\n");
return output;
}
void HttpHeaderBlock::erase(absl::string_view key) {
auto iter = map_.find(key);
if (iter != map_.end()) {
QUICHE_DVLOG(1) << "Erasing header with name: " << key;
key_size_ -= key.size();
value_size_ -= iter->second.SizeEstimate();
map_.erase(iter);
}
}
void HttpHeaderBlock::clear() {
key_size_ = 0;
value_size_ = 0;
map_.clear();
storage_.Clear();
}
HttpHeaderBlock::InsertResult HttpHeaderBlock::insert(
const HttpHeaderBlock::value_type& value) {
value_size_ += value.second.size();
auto iter = map_.find(value.first);
if (iter == map_.end()) {
QUICHE_DVLOG(1) << "Inserting: (" << value.first << ", " << value.second
<< ")";
AppendHeader(value.first, value.second);
return InsertResult::kInserted;
} else {
QUICHE_DVLOG(1) << "Updating key: " << iter->first
<< " with value: " << value.second;
value_size_ -= iter->second.SizeEstimate();
iter->second =
HeaderValue(&storage_, iter->first, storage_.Write(value.second));
return InsertResult::kReplaced;
}
}
HttpHeaderBlock::ValueProxy HttpHeaderBlock::operator[](
const absl::string_view key) {
QUICHE_DVLOG(2) << "Operator[] saw key: " << key;
absl::string_view out_key;
auto iter = map_.find(key);
if (iter == map_.end()) {
out_key = WriteKey(key);
QUICHE_DVLOG(2) << "Key written as: " << std::hex
<< static_cast<const void*>(key.data()) << ", " << std::dec
<< key.size();
} else {
out_key = iter->first;
}
return ValueProxy(this, iter, out_key, &value_size_);
}
void HttpHeaderBlock::AppendValueOrAddHeader(const absl::string_view key,
const absl::string_view value) {
value_size_ += value.size();
auto iter = map_.find(key);
if (iter == map_.end()) {
QUICHE_DVLOG(1) << "Inserting: (" << key << ", " << value << ")";
AppendHeader(key, value);
return;
}
QUICHE_DVLOG(1) << "Updating key: " << iter->first
<< "; appending value: " << value;
value_size_ += SeparatorForKey(key).size();
iter->second.Append(storage_.Write(value));
}
void HttpHeaderBlock::AppendHeader(const absl::string_view key,
const absl::string_view value) {
auto backed_key = WriteKey(key);
map_.emplace(std::make_pair(
backed_key, HeaderValue(&storage_, backed_key, storage_.Write(value))));
}
absl::string_view HttpHeaderBlock::WriteKey(const absl::string_view key) {
key_size_ += key.size();
return storage_.Write(key);
}
size_t HttpHeaderBlock::bytes_allocated() const {
return storage_.bytes_allocated();
}
} | #include "quiche/common/http/http_header_block.h"
#include <memory>
#include <string>
#include <utility>
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/spdy/test_tools/spdy_test_utils.h"
using ::testing::ElementsAre;
namespace quiche {
namespace test {
class ValueProxyPeer {
public:
static absl::string_view key(HttpHeaderBlock::ValueProxy* p) {
return p->key_;
}
};
std::pair<absl::string_view, absl::string_view> Pair(absl::string_view k,
absl::string_view v) {
return std::make_pair(k, v);
}
TEST(HttpHeaderBlockTest, EmptyBlock) {
HttpHeaderBlock block;
EXPECT_TRUE(block.empty());
EXPECT_EQ(0u, block.size());
EXPECT_EQ(block.end(), block.find("foo"));
EXPECT_FALSE(block.contains("foo"));
EXPECT_TRUE(block.end() == block.begin());
block.erase("bar");
}
TEST(HttpHeaderBlockTest, KeyMemoryReclaimedOnLookup) {
HttpHeaderBlock block;
absl::string_view copied_key1;
{
auto proxy1 = block["some key name"];
copied_key1 = ValueProxyPeer::key(&proxy1);
}
absl::string_view copied_key2;
{
auto proxy2 = block["some other key name"];
copied_key2 = ValueProxyPeer::key(&proxy2);
}
EXPECT_EQ(copied_key1.data(), copied_key2.data());
{
auto proxy1 = block["some key name"];
block["some other key name"] = "some value";
}
block["key"] = "value";
EXPECT_EQ("value", block["key"]);
EXPECT_EQ("some value", block["some other key name"]);
EXPECT_TRUE(block.find("some key name") == block.end());
}
TEST(HttpHeaderBlockTest, AddHeaders) {
HttpHeaderBlock block;
block["foo"] = std::string(300, 'x');
block["bar"] = "baz";
block["qux"] = "qux1";
block["qux"] = "qux2";
block.insert(std::make_pair("key", "value"));
EXPECT_EQ(Pair("foo", std::string(300, 'x')), *block.find("foo"));
EXPECT_EQ("baz", block["bar"]);
std::string qux("qux");
EXPECT_EQ("qux2", block[qux]);
ASSERT_NE(block.end(), block.find("key"));
ASSERT_TRUE(block.contains("key"));
EXPECT_EQ(Pair("key", "value"), *block.find("key"));
block.erase("key");
EXPECT_EQ(block.end(), block.find("key"));
}
TEST(HttpHeaderBlockTest, CopyBlocks) {
HttpHeaderBlock block1;
block1["foo"] = std::string(300, 'x');
block1["bar"] = "baz";
block1.insert(std::make_pair("qux", "qux1"));
HttpHeaderBlock block2 = block1.Clone();
HttpHeaderBlock block3(block1.Clone());
EXPECT_EQ(block1, block2);
EXPECT_EQ(block1, block3);
}
TEST(HttpHeaderBlockTest, Equality) {
HttpHeaderBlock block1;
block1["foo"] = "bar";
HttpHeaderBlock block2;
block2["foo"] = "bar";
HttpHeaderBlock block3;
block3["baz"] = "qux";
EXPECT_EQ(block1, block2);
EXPECT_NE(block1, block3);
block2["baz"] = "qux";
EXPECT_NE(block1, block2);
}
HttpHeaderBlock ReturnTestHeaderBlock() {
HttpHeaderBlock block;
block["foo"] = "bar";
block.insert(std::make_pair("foo2", "baz"));
return block;
}
TEST(HttpHeaderBlockTest, MovedFromIsValid) {
HttpHeaderBlock block1;
block1["foo"] = "bar";
HttpHeaderBlock block2(std::move(block1));
EXPECT_THAT(block2, ElementsAre(Pair("foo", "bar")));
block1["baz"] = "qux";
HttpHeaderBlock block3(std::move(block1));
block1["foo"] = "bar";
HttpHeaderBlock block4(std::move(block1));
block1.clear();
EXPECT_TRUE(block1.empty());
block1["foo"] = "bar";
EXPECT_THAT(block1, ElementsAre(Pair("foo", "bar")));
HttpHeaderBlock block5 = ReturnTestHeaderBlock();
block5.AppendValueOrAddHeader("foo", "bar2");
EXPECT_THAT(block5, ElementsAre(Pair("foo", std::string("bar\0bar2", 8)),
Pair("foo2", "baz")));
}
TEST(HttpHeaderBlockTest, AppendHeaders) {
HttpHeaderBlock block;
block["foo"] = "foo";
block.AppendValueOrAddHeader("foo", "bar");
EXPECT_EQ(Pair("foo", std::string("foo\0bar", 7)), *block.find("foo"));
block.insert(std::make_pair("foo", "baz"));
EXPECT_EQ("baz", block["foo"]);
EXPECT_EQ(Pair("foo", "baz"), *block.find("foo"));
block["cookie"] = "key1=value1";
block.AppendValueOrAddHeader("h1", "h1v1");
block.insert(std::make_pair("h2", "h2v1"));
block.AppendValueOrAddHeader("h3", "h3v2");
block.AppendValueOrAddHeader("h2", "h2v2");
block.AppendValueOrAddHeader("h1", "h1v2");
block.AppendValueOrAddHeader("cookie", "key2=value2");
block.AppendValueOrAddHeader("cookie", "key3=value3");
block.AppendValueOrAddHeader("h1", "h1v3");
block.AppendValueOrAddHeader("h2", "h2v3");
block.AppendValueOrAddHeader("h3", "h3v3");
block.AppendValueOrAddHeader("h4", "singleton");
block.AppendValueOrAddHeader("set-cookie", "yummy");
block.AppendValueOrAddHeader("set-cookie", "scrumptious");
EXPECT_EQ("key1=value1; key2=value2; key3=value3", block["cookie"]);
EXPECT_EQ("baz", block["foo"]);
EXPECT_EQ(std::string("h1v1\0h1v2\0h1v3", 14), block["h1"]);
EXPECT_EQ(std::string("h2v1\0h2v2\0h2v3", 14), block["h2"]);
EXPECT_EQ(std::string("h3v2\0h3v3", 9), block["h3"]);
EXPECT_EQ("singleton", block["h4"]);
EXPECT_EQ(std::string("yummy\0scrumptious", 17), block["set-cookie"]);
}
TEST(HttpHeaderBlockTest, CompareValueToStringPiece) {
HttpHeaderBlock block;
block["foo"] = "foo";
block.AppendValueOrAddHeader("foo", "bar");
const auto& val = block["foo"];
const char expected[] = "foo\0bar";
EXPECT_TRUE(absl::string_view(expected, 7) == val);
EXPECT_TRUE(val == absl::string_view(expected, 7));
EXPECT_FALSE(absl::string_view(expected, 3) == val);
EXPECT_FALSE(val == absl::string_view(expected, 3));
const char not_expected[] = "foo\0barextra";
EXPECT_FALSE(absl::string_view(not_expected, 12) == val);
EXPECT_FALSE(val == absl::string_view(not_expected, 12));
const auto& val2 = block["foo2"];
EXPECT_FALSE(absl::string_view(expected, 7) == val2);
EXPECT_FALSE(val2 == absl::string_view(expected, 7));
EXPECT_FALSE(absl::string_view("") == val2);
EXPECT_FALSE(val2 == absl::string_view(""));
}
TEST(HttpHeaderBlockTest, UpperCaseNames) {
HttpHeaderBlock block;
block["Foo"] = "foo";
block.AppendValueOrAddHeader("Foo", "bar");
EXPECT_NE(block.end(), block.find("foo"));
EXPECT_EQ(Pair("Foo", std::string("foo\0bar", 7)), *block.find("Foo"));
block.AppendValueOrAddHeader("foo", "baz");
EXPECT_THAT(block,
ElementsAre(Pair("Foo", std::string("foo\0bar\0baz", 11))));
}
namespace {
size_t HttpHeaderBlockSize(const HttpHeaderBlock& block) {
size_t size = 0;
for (const auto& pair : block) {
size += pair.first.size() + pair.second.size();
}
return size;
}
}
TEST(HttpHeaderBlockTest, TotalBytesUsed) {
HttpHeaderBlock block;
const size_t value_size = 300;
block["foo"] = std::string(value_size, 'x');
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.insert(std::make_pair("key", std::string(value_size, 'x')));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.AppendValueOrAddHeader("abc", std::string(value_size, 'x'));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block["foo"] = std::string(value_size, 'x');
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.insert(std::make_pair("key", std::string(value_size, 'x')));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
block.AppendValueOrAddHeader("abc", std::string(value_size, 'x'));
EXPECT_EQ(block.TotalBytesUsed(), HttpHeaderBlockSize(block));
size_t block_size = block.TotalBytesUsed();
HttpHeaderBlock block_copy = std::move(block);
EXPECT_EQ(block_size, block_copy.TotalBytesUsed());
block_copy.erase("foo");
EXPECT_EQ(block_copy.TotalBytesUsed(), HttpHeaderBlockSize(block_copy));
block_copy.erase("key");
EXPECT_EQ(block_copy.TotalBytesUsed(), HttpHeaderBlockSize(block_copy));
block_copy.erase("abc");
EXPECT_EQ(block_copy.TotalBytesUsed(), HttpHeaderBlockSize(block_copy));
}
TEST(HttpHeaderBlockTest, OrderPreserved) {
HttpHeaderBlock block;
block[":method"] = "GET";
block["foo"] = "bar";
block[":path"] = "/";
EXPECT_THAT(block, ElementsAre(Pair(":method", "GET"), Pair("foo", "bar"),
Pair(":path", "/")));
}
TEST(HttpHeaderBlockTest, InsertReturnValue) {
HttpHeaderBlock block;
EXPECT_EQ(HttpHeaderBlock::InsertResult::kInserted,
block.insert({"foo", "bar"}));
EXPECT_EQ(HttpHeaderBlock::InsertResult::kReplaced,
block.insert({"foo", "baz"}));
}
}
} |
5 | #ifndef XLA_WINDOW_UTIL_H_
#define XLA_WINDOW_UTIL_H_
#include "absl/types/span.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace window_util {
Window MakeWindow(absl::Span<const int64_t> sizes);
Window MakeWindow(absl::Span<const int64_t> sizes,
absl::Span<const int64_t> strides);
PaddingConfig MakeSymmetricPadding(absl::Span<const int64_t> sizes);
std::string ToString(const WindowDimension& dim);
std::string ToString(const Window& window);
bool HasStride(const Window& window);
bool HasPadding(const Window& window);
bool HasSymmetricPadding(const Window& window);
bool HasNegativePadding(const Window& window);
bool HasSymmetricPadding(const PaddingConfig& padding_config);
bool HasBaseDilation(const Window& window);
bool HasWindowDilation(const Window& window);
bool HasDilation(const Window& window);
bool HasOverlappingWindow(const Window& window);
bool HasWindowReversal(const Window& window);
bool AllOrNoneReversed(const Window& window);
bool IsTrivialWindowDimension(const WindowDimension& window_dimension);
int64_t DilatedBound(int64_t bound, int64_t dilation);
int64_t StridedBound(int64_t bound, int64_t window_size, int64_t stride);
}
}
#endif
#include "xla/window_util.h"
#include <functional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace window_util {
Window MakeWindow(absl::Span<const int64_t> sizes) {
Window window;
for (int64_t size : sizes) {
auto* dimension = window.add_dimensions();
dimension->set_size(size);
dimension->set_stride(1);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
Window MakeWindow(absl::Span<const int64_t> sizes,
absl::Span<const int64_t> strides) {
Window window;
CHECK_EQ(sizes.size(), strides.size());
for (auto nb = 0; nb < sizes.size(); ++nb) {
auto* dimension = window.add_dimensions();
dimension->set_size(sizes[nb]);
dimension->set_stride(strides[nb]);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
PaddingConfig MakeSymmetricPadding(absl::Span<const int64_t> sizes) {
PaddingConfig config;
for (int64_t size : sizes) {
auto* dimension = config.add_dimensions();
dimension->set_edge_padding_low(size);
dimension->set_edge_padding_high(size);
}
return config;
}
std::string ToString(const WindowDimension& dim) {
using absl::StrAppend;
using absl::StrCat;
std::string str = StrCat("(size=", dim.size());
if (dim.stride() != 1) {
StrAppend(&str, ",stride=", dim.stride());
}
if (dim.padding_low() != 0) {
StrAppend(&str, ",padding_low=", dim.padding_low());
}
if (dim.padding_high() != 0) {
StrAppend(&str, ",padding_high=", dim.padding_high());
}
if (dim.base_dilation() != 1) {
StrAppend(&str, ",base_dilation=", dim.base_dilation());
}
if (dim.window_dilation() != 1) {
StrAppend(&str, ",window_dilation=", dim.window_dilation());
}
if (dim.window_reversal()) {
StrAppend(&str, ",window_reversal");
}
StrAppend(&str, ")");
return str;
}
std::string ToString(const Window& window) {
using absl::StrAppend;
using absl::StrCat;
std::string str;
const auto add_field =
[&](const char* heading,
absl::FunctionRef<std::string(const WindowDimension&)> format) {
StrAppend(&str, heading, "=");
const char* prefix = "";
for (const auto& window_dimension : window.dimensions()) {
StrAppend(&str, prefix, format(window_dimension));
prefix = "x";
}
};
if (window.dimensions_size() > 0) {
add_field("size",
[](const WindowDimension& dim) { return StrCat(dim.size()); });
}
if (HasStride(window)) {
add_field(" stride",
[](const WindowDimension& dim) { return StrCat(dim.stride()); });
}
if (HasPadding(window)) {
add_field(" pad", [](const WindowDimension& dim) {
return StrCat(dim.padding_low(), "_", dim.padding_high());
});
}
if (HasBaseDilation(window)) {
add_field(" lhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.base_dilation());
});
}
if (HasWindowDilation(window)) {
add_field(" rhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.window_dilation());
});
}
if (HasWindowReversal(window)) {
add_field(" rhs_reversal", [](const WindowDimension& dim) {
return StrCat(dim.window_reversal() ? 1 : 0);
});
}
return str;
}
bool HasStride(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.stride() != 1) {
return true;
}
}
return false;
}
bool HasPadding(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.padding_low() != 0 || dim.padding_high() != 0) {
return true;
}
}
return false;
}
bool HasSymmetricPadding(const Window& window) {
return absl::c_all_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() == dim.padding_high();
});
}
bool HasSymmetricPadding(const PaddingConfig& padding_config) {
return absl::c_all_of(padding_config.dimensions(),
[](const PaddingConfig::PaddingConfigDimension& dim) {
return dim.edge_padding_low() ==
dim.edge_padding_high();
});
}
bool HasNegativePadding(const Window& window) {
return absl::c_any_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() < 0 || dim.padding_high() < 0;
});
}
bool HasBaseDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.base_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowReversal(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_reversal()) {
return true;
}
}
return false;
}
bool AllOrNoneReversed(const Window& window) {
if (window.dimensions().empty()) {
return true;
}
bool reversed = window.dimensions()[0].window_reversal();
return absl::c_all_of(window.dimensions(), [&](const WindowDimension& dim) {
return dim.window_reversal() == reversed;
});
}
bool HasDilation(const Window& window) {
return HasBaseDilation(window) || HasWindowDilation(window);
}
bool IsTrivialWindowDimension(const WindowDimension& window_dimension) {
return window_dimension.size() == 1 && window_dimension.stride() == 1 &&
window_dimension.padding_low() == 0 &&
window_dimension.padding_high() == 0 &&
window_dimension.window_dilation() == 1 &&
window_dimension.base_dilation() == 1;
}
bool HasOverlappingWindow(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.size() > dim.stride()) {
return true;
}
}
return false;
}
int64_t DilatedBound(int64_t bound, int64_t dilation) {
CHECK_GE(bound, 0);
CHECK_GE(dilation, 1);
if (bound == 0) {
return 0;
}
return (bound - 1) * dilation + 1;
}
int64_t StridedBound(int64_t bound, int64_t window_size, int64_t stride) {
CHECK_GE(window_size, 0);
CHECK_GE(bound, 0);
CHECK_GE(stride, 1);
if (bound == 0 || window_size > bound) {
return 0;
}
return (bound - window_size) / stride + 1;
}
}
} | #include "xla/window_util.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
TEST(WindowUtilTest, HasOverlappingWindowTest) {
EXPECT_FALSE(
window_util::HasOverlappingWindow(window_util::MakeWindow({1, 1})));
EXPECT_TRUE(
window_util::HasOverlappingWindow(window_util::MakeWindow({2, 2, 2, 2})));
}
TEST(WindowUtilTest, MakeWindowStrideTest) {
Window w = window_util::MakeWindow({1, 2}, {3, 4});
EXPECT_EQ(w.dimensions()[0].size(), 1);
EXPECT_EQ(w.dimensions()[1].size(), 2);
EXPECT_EQ(w.dimensions()[0].stride(), 3);
EXPECT_EQ(w.dimensions()[1].stride(), 4);
}
}
} |
6 | #ifndef QUICHE_COMMON_HTTP_HTTP_HEADER_STORAGE_H_
#define QUICHE_COMMON_HTTP_HTTP_HEADER_STORAGE_H_
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/quiche_simple_arena.h"
namespace quiche {
using Fragments = absl::InlinedVector<absl::string_view, 1>;
class QUICHE_EXPORT HttpHeaderStorage {
public:
HttpHeaderStorage();
HttpHeaderStorage(const HttpHeaderStorage&) = delete;
HttpHeaderStorage& operator=(const HttpHeaderStorage&) = delete;
HttpHeaderStorage(HttpHeaderStorage&& other) = default;
HttpHeaderStorage& operator=(HttpHeaderStorage&& other) = default;
absl::string_view Write(absl::string_view s);
void Rewind(absl::string_view s);
void Clear() { arena_.Reset(); }
absl::string_view WriteFragments(const Fragments& fragments,
absl::string_view separator);
size_t bytes_allocated() const { return arena_.status().bytes_allocated(); }
private:
QuicheSimpleArena arena_;
};
QUICHE_EXPORT size_t Join(char* dst, const Fragments& fragments,
absl::string_view separator);
}
#endif
#include "quiche/common/http/http_header_storage.h"
#include <cstring>
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
const size_t kDefaultStorageBlockSize = 2048;
}
HttpHeaderStorage::HttpHeaderStorage() : arena_(kDefaultStorageBlockSize) {}
absl::string_view HttpHeaderStorage::Write(const absl::string_view s) {
return absl::string_view(arena_.Memdup(s.data(), s.size()), s.size());
}
void HttpHeaderStorage::Rewind(const absl::string_view s) {
arena_.Free(const_cast<char*>(s.data()), s.size());
}
absl::string_view HttpHeaderStorage::WriteFragments(
const Fragments& fragments, absl::string_view separator) {
if (fragments.empty()) {
return absl::string_view();
}
size_t total_size = separator.size() * (fragments.size() - 1);
for (const absl::string_view& fragment : fragments) {
total_size += fragment.size();
}
char* dst = arena_.Alloc(total_size);
size_t written = Join(dst, fragments, separator);
QUICHE_DCHECK_EQ(written, total_size);
return absl::string_view(dst, total_size);
}
size_t Join(char* dst, const Fragments& fragments,
absl::string_view separator) {
if (fragments.empty()) {
return 0;
}
auto* original_dst = dst;
auto it = fragments.begin();
memcpy(dst, it->data(), it->size());
dst += it->size();
for (++it; it != fragments.end(); ++it) {
memcpy(dst, separator.data(), separator.size());
dst += separator.size();
memcpy(dst, it->data(), it->size());
dst += it->size();
}
return dst - original_dst;
}
} | #include "quiche/common/http/http_header_storage.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
TEST(JoinTest, JoinEmpty) {
Fragments empty;
absl::string_view separator = ", ";
char buf[10] = "";
size_t written = Join(buf, empty, separator);
EXPECT_EQ(0u, written);
}
TEST(JoinTest, JoinOne) {
Fragments v = {"one"};
absl::string_view separator = ", ";
char buf[15];
size_t written = Join(buf, v, separator);
EXPECT_EQ(3u, written);
EXPECT_EQ("one", absl::string_view(buf, written));
}
TEST(JoinTest, JoinMultiple) {
Fragments v = {"one", "two", "three"};
absl::string_view separator = ", ";
char buf[15];
size_t written = Join(buf, v, separator);
EXPECT_EQ(15u, written);
EXPECT_EQ("one, two, three", absl::string_view(buf, written));
}
}
} |
7 | #ifndef TENSORFLOW_CORE_IR_INTERFACES_H_
#define TENSORFLOW_CORE_IR_INTERFACES_H_
#include "mlir/IR/Dialect.h"
#include "mlir/IR/DialectInterface.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h.inc"
namespace mlir {
namespace tfg {
class TensorFlowRegistryInterfaceBase
: public TensorFlowRegistryInterface::FallbackModel<
TensorFlowRegistryInterfaceBase>,
public DialectInterface::Base<TensorFlowRegistryInterfaceBase> {
public:
explicit TensorFlowRegistryInterfaceBase(Dialect *dialect)
: DialectInterface::Base<TensorFlowRegistryInterfaceBase>(dialect) {}
virtual bool isStateful(Operation *op) const = 0;
};
class StatefulMemoryEffectInterface
: public MemoryEffectOpInterface::FallbackModel<
StatefulMemoryEffectInterface>,
public DialectInterface::Base<StatefulMemoryEffectInterface> {
public:
explicit StatefulMemoryEffectInterface(Dialect *dialect)
: DialectInterface::Base<StatefulMemoryEffectInterface>(dialect) {}
void getEffects(
Operation *op,
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) const;
};
}
namespace OpTrait {
template <typename ConcreteType>
class IntrinsicOperation
: public mlir::OpTrait::TraitBase<ConcreteType, IntrinsicOperation> {};
}
}
#endif
#include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
namespace tfg {
LogicalResult ControlArgumentInterface::verifyRegion(Operation *op,
Region ®ion) {
unsigned num_ctl = 0, num_data = 0;
for (BlockArgument arg : region.getArguments()) {
bool is_ctl = mlir::isa<tf_type::ControlType>(arg.getType());
num_ctl += is_ctl;
num_data += !is_ctl;
}
if (num_ctl != num_data) {
return op->emitOpError("region #")
<< region.getRegionNumber()
<< " expected same number of data values and control tokens ("
<< num_data << " vs. " << num_ctl << ")";
}
return success();
}
void StatefulMemoryEffectInterface::getEffects(
Operation *op,
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) const {
auto registry = dyn_cast<TensorFlowRegistryInterface>(op);
if (!registry || registry.isStateful() || op->getParentOfType<GraphOp>()) {
effects.emplace_back(MemoryEffects::Write::get());
}
}
}
}
#include "tensorflow/core/ir/interfaces.cc.inc" | #include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/IR/DialectInterface.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/Verifier.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TensorFlowRegistryInterface, TestDefaultImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.Foo");
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
ASSERT_TRUE(succeeded(verify(op)));
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
EXPECT_FALSE(iface);
}
TEST(TensorFlowRegisterInterface, TestCustomImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
DialectRegistry registry;
registry.insert<TFGraphDialect>();
struct CustomRegistryInterface : public TensorFlowRegistryInterfaceBase {
using TensorFlowRegistryInterfaceBase::TensorFlowRegistryInterfaceBase;
bool isStateful(Operation *op) const override {
return op->getName().stripDialect() == "Foo";
}
};
registry.addExtension(+[](mlir::MLIRContext *ctx, TFGraphDialect *dialect) {
dialect->addInterfaces<CustomRegistryInterface>();
});
context.appendDialectRegistry(registry);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
SmallVector<StringRef, 2> op_names = {"tfg.Foo", "tfg.Bar"};
SmallVector<bool, 2> expected = {true, false};
for (auto it : llvm::zip(op_names, expected)) {
OperationState state(UnknownLoc::get(&context), std::get<0>(it));
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
ASSERT_TRUE(iface);
EXPECT_EQ(iface.isStateful(), std::get<1>(it));
}
}
}
}
} |
8 | #ifndef MLIR_HLO_DIALECT_MHLO_IR_REGISTER_H_
#define MLIR_HLO_DIALECT_MHLO_IR_REGISTER_H_
namespace mlir {
class DialectRegistry;
namespace mhlo {
void registerAllMhloDialects(DialectRegistry ®istry);
}
}
#endif
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tflite_with_xnnpack_optional.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_NUMERIC_VERIFY();
TfLiteRegistration* Register_AUDIO_SPECTROGRAM();
TfLiteRegistration* Register_MFCC();
TfLiteRegistration* Register_DETECTION_POSTPROCESS();
}
namespace builtin {
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_ABS, Register_ABS(), 1,
5);
AddBuiltin(BuiltinOperator_HARD_SWISH, Register_HARD_SWISH());
AddBuiltin(BuiltinOperator_RELU, Register_RELU(), 1,
3);
AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
AddBuiltin(BuiltinOperator_RELU_0_TO_1, Register_RELU_0_TO_1());
AddBuiltin(BuiltinOperator_RELU6, Register_RELU6(), 1,
3);
AddBuiltin(BuiltinOperator_TANH, Register_TANH(), 1,
3);
AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC(),
1,
3);
AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, Register_AVERAGE_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_MAX_POOL_2D, Register_MAX_POOL_2D(),
1,
3);
AddBuiltin(BuiltinOperator_L2_POOL_2D, Register_L2_POOL_2D());
AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(),
1,
8);
AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, Register_DEPTHWISE_CONV_2D(),
1,
7);
AddBuiltin(BuiltinOperator_SVDF, Register_SVDF(),
1,
4);
AddBuiltin(BuiltinOperator_RNN, Register_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
Register_BIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
Register_UNIDIRECTIONAL_SEQUENCE_RNN(),
1,
3);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP, Register_EMBEDDING_LOOKUP(),
1,
3);
AddBuiltin(BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
Register_EMBEDDING_LOOKUP_SPARSE());
AddBuiltin(BuiltinOperator_FULLY_CONNECTED, Register_FULLY_CONNECTED(),
1,
12);
AddBuiltin(BuiltinOperator_LSH_PROJECTION, Register_LSH_PROJECTION());
AddBuiltin(BuiltinOperator_HASHTABLE_LOOKUP, Register_HASHTABLE_LOOKUP());
AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(),
1,
3);
AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION(),
1,
4);
AddBuiltin(BuiltinOperator_ADD, Register_ADD(),
1,
5);
AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, Register_SPACE_TO_BATCH_ND(),
1,
4);
AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, Register_BATCH_TO_SPACE_ND(),
1,
4);
AddBuiltin(BuiltinOperator_MUL, Register_MUL(), 1,
7);
AddBuiltin(BuiltinOperator_L2_NORMALIZATION, Register_L2_NORMALIZATION(),
1,
2);
AddBuiltin(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
Register_LOCAL_RESPONSE_NORMALIZATION());
AddBuiltin(BuiltinOperator_LSTM, Register_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
Register_BIDIRECTIONAL_SEQUENCE_LSTM(), 1,
3);
AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
Register_UNIDIRECTIONAL_SEQUENCE_LSTM(), 1,
4);
AddBuiltin(BuiltinOperator_PAD, Register_PAD(), 1,
4);
AddBuiltin(BuiltinOperator_PADV2, Register_PADV2(), 1,
4);
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR(),
1,
4);
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
Register_RESIZE_NEAREST_NEIGHBOR(),
1,
4);
AddBuiltin(BuiltinOperator_SKIP_GRAM, Register_SKIP_GRAM());
AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH(),
1,
2);
AddBuiltin(BuiltinOperator_DEPTH_TO_SPACE, Register_DEPTH_TO_SPACE(),
1,
2);
AddBuiltin(BuiltinOperator_GATHER, Register_GATHER(),
1,
7);
AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE(),
1,
6);
AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(),
1,
3);
AddBuiltin(BuiltinOperator_DIV, Register_DIV(),
1,
2);
AddBuiltin(BuiltinOperator_SUB, Register_SUB(),
1,
5);
AddBuiltin(BuiltinOperator_SPLIT, Register_SPLIT(),
1,
4);
AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V(),
1,
2);
AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE(),
1,
2);
AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE(),
1,
8);
AddBuiltin(BuiltinOperator_EXP, Register_EXP(),
1,
2);
AddBuiltin(BuiltinOperator_TOPK_V2, Register_TOPK_V2(),
1,
3);
AddBuiltin(BuiltinOperator_LOG, Register_LOG(),
1,
2);
AddBuiltin(BuiltinOperator_LOG_SOFTMAX, Register_LOG_SOFTMAX(),
1,
2);
AddBuiltin(BuiltinOperator_CAST, Register_CAST(),
1,
6);
AddBuiltin(BuiltinOperator_DEQUANTIZE, Register_DEQUANTIZE(),
1,
6);
AddBuiltin(BuiltinOperator_PRELU, Register_PRELU());
AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM(),
1,
4);
AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_GREATER, Register_GREATER(),
1,
2);
AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_LESS, Register_LESS(),
1,
3);
AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL(),
1,
2);
AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR());
AddBuiltin(BuiltinOperator_CEIL, Register_CEIL());
AddBuiltin(BuiltinOperator_ROUND, Register_ROUND());
AddBuiltin(BuiltinOperator_NEG, Register_NEG());
AddBuiltin(BuiltinOperator_SELECT, Register_SELECT(),
1,
4);
AddBuiltin(BuiltinOperator_SELECT_V2, Register_SELECT_V2(),
1,
2);
AddBuiltin(BuiltinOperator_SLICE, Register_SLICE(),
1,
6);
AddBuiltin(BuiltinOperator_SIN, Register_SIN());
AddBuiltin(BuiltinOperator_COS, Register_COS());
AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV(),
1,
5);
AddBuiltin(BuiltinOperator_TILE, Register_TILE(),
1,
3);
AddBuiltin(BuiltinOperator_SUM, Register_SUM(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD(),
1,
2);
AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN(),
1,
3);
AddBuiltin(BuiltinOperator_REDUCE_ANY, Register_REDUCE_ANY());
AddBuiltin(BuiltinOperator_REDUCE_ALL, Register_REDUCE_ALL());
AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE(),
1,
3);
AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL(),
1,
4);
AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL(),
1,
3);
AddBuiltin(BuiltinOperator_SQRT, Register_SQRT());
AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT(),
1,
3);
AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
AddBuiltin(BuiltinOperator_RANK, Register_RANK());
AddBuiltin(BuiltinOperator_POW, Register_POW());
AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
AddBuiltin(BuiltinOperator_PACK, Register_PACK(),
1,
4);
AddBuiltin(BuiltinOperator_ONE_HOT, Register_ONE_HOT());
AddBuiltin(BuiltinOperator_LOGICAL_OR, Register_LOGICAL_OR());
AddBuiltin(BuiltinOperator_LOGICAL_AND, Register_LOGICAL_AND());
AddBuiltin(BuiltinOperator_LOGICAL_NOT, Register_LOGICAL_NOT());
AddBuiltin(BuiltinOperator_UNPACK, Register_UNPACK(),
1,
4);
AddBuiltin(BuiltinOperator_FLOOR_DIV, Register_FLOOR_DIV(),
1,
3);
AddBuiltin(BuiltinOperator_SQUARE, Register_SQUARE());
AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE());
AddBuiltin(BuiltinOperator_FLOOR_MOD, Register_FLOOR_MOD(),
1,
2);
AddBuiltin(BuiltinOperator_RANGE, Register_RANGE(),
1,
2);
AddBuiltin(BuiltinOperator_LEAKY_RELU, Register_LEAKY_RELU(),
1,
2);
AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, Register_SQUARED_DIFFERENCE(),
1,
2);
AddBuiltin(BuiltinOperator_FILL, Register_FILL(),
1,
4);
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD(),
1,
3);
AddBuiltin(BuiltinOperator_UNIQUE, Register_UNIQUE());
AddBuiltin(BuiltinOperator_REVERSE_V2, Register_REVERSE_V2(),
1,
3);
AddBuiltin(BuiltinOperator_ADD_N, Register_ADD_N());
AddBuiltin(BuiltinOperator_GATHER_ND, Register_GATHER_ND(),
1,
5);
AddBuiltin(BuiltinOperator_WHERE, Register_WHERE(),
1,
2);
AddBuiltin(BuiltinOperator_ELU, Register_ELU());
AddBuiltin(BuiltinOperator_REVERSE_SEQUENCE, Register_REVERSE_SEQUENCE());
AddBuiltin(BuiltinOperator_MATRIX_DIAG, Register_MATRIX_DIAG());
AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(),
1,
3);
AddBuiltin(BuiltinOperator_MATRIX_SET_DIAG, Register_MATRIX_SET_DIAG());
AddBuiltin(BuiltinOperator_IF, tflite::ops::builtin::Register_IF());
AddBuiltin(BuiltinOperator_WHILE, tflite::ops::builtin::Register_WHILE());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V4,
Register_NON_MAX_SUPPRESSION_V4());
AddBuiltin(BuiltinOperator_NON_MAX_SUPPRESSION_V5,
Register_NON_MAX_SUPPRESSION_V5());
AddBuiltin(BuiltinOperator_SCATTER_ND, Register_SCATTER_ND());
AddBuiltin(BuiltinOperator_DENSIFY, Register_DENSIFY());
AddBuiltin(BuiltinOperator_SEGMENT_SUM, Register_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_BATCH_MATMUL, Register_BATCH_MATMUL(),
1,
4);
AddBuiltin(BuiltinOperator_CUMSUM, Register_CUMSUM());
AddBuiltin(BuiltinOperator_BROADCAST_TO, Register_BROADCAST_TO(),
2,
3);
AddBuiltin(BuiltinOperator_CALL_ONCE,
tflite::ops::builtin::Register_CALL_ONCE());
AddBuiltin(BuiltinOperator_RFFT2D, Register_RFFT2D());
AddBuiltin(BuiltinOperator_CONV_3D, Register_CONV_3D());
AddBuiltin(BuiltinOperator_IMAG, Register_IMAG());
AddBuiltin(BuiltinOperator_REAL, Register_REAL());
AddBuiltin(BuiltinOperator_COMPLEX_ABS, Register_COMPLEX_ABS());
AddBuiltin(BuiltinOperator_BROADCAST_ARGS, Register_BROADCAST_ARGS());
AddBuiltin(BuiltinOperator_HASHTABLE, Register_HASHTABLE());
AddBuiltin(BuiltinOperator_HASHTABLE_FIND, Register_HASHTABLE_FIND());
AddBuiltin(BuiltinOperator_HASHTABLE_IMPORT, Register_HASHTABLE_IMPORT());
AddBuiltin(BuiltinOperator_HASHTABLE_SIZE, Register_HASHTABLE_SIZE());
AddBuiltin(BuiltinOperator_CONV_3D_TRANSPOSE, Register_CONV_3D_TRANSPOSE());
AddBuiltin(BuiltinOperator_VAR_HANDLE, Register_VAR_HANDLE());
AddBuiltin(BuiltinOperator_READ_VARIABLE, Register_READ_VARIABLE());
AddBuiltin(BuiltinOperator_ASSIGN_VARIABLE, Register_ASSIGN_VARIABLE());
AddBuiltin(BuiltinOperator_MULTINOMIAL, Register_MULTINOMIAL());
AddBuiltin(BuiltinOperator_RANDOM_STANDARD_NORMAL,
Register_RANDOM_STANDARD_NORMAL());
AddBuiltin(BuiltinOperator_BUCKETIZE, Register_BUCKETIZE());
AddBuiltin(BuiltinOperator_RANDOM_UNIFORM, Register_RANDOM_UNIFORM());
AddBuiltin(BuiltinOperator_GELU, Register_GELU(),
1,
2);
AddBuiltin(BuiltinOperator_DYNAMIC_UPDATE_SLICE,
Register_DYNAMIC_UPDATE_SLICE(),
1,
2);
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_PROD,
Register_UNSORTED_SEGMENT_PROD());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MAX,
Register_UNSORTED_SEGMENT_MAX());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_MIN,
Register_UNSORTED_SEGMENT_MIN());
AddBuiltin(BuiltinOperator_UNSORTED_SEGMENT_SUM,
Register_UNSORTED_SEGMENT_SUM());
AddBuiltin(BuiltinOperator_ATAN2, Register_ATAN2());
AddBuiltin(BuiltinOperator_SIGN, Register_SIGN(),
1,
2);
AddBuiltin(BuiltinOperator_BITCAST, Register_BITCAST());
AddBuiltin(BuiltinOperator_BITWISE_XOR, Register_BITWISE_XOR());
AddBuiltin(BuiltinOperator_RIGHT_SHIFT, Register_RIGHT_SHIFT());
AddBuiltin(BuiltinOperator_STABLEHLO_SCATTER, Register_STABLEHLO_SCATTER());
AddBuiltin(BuiltinOperator_DILATE, Register_DILATE());
AddBuiltin(BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR,
Register_STABLEHLO_RNG_BIT_GENERATOR());
AddBuiltin(BuiltinOperator_REDUCE_WINDOW, Register_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
Register_STABLEHLO_REDUCE_WINDOW());
AddBuiltin(BuiltinOperator_STABLEHLO_GATHER, Register_STABLEHLO_GATHER());
AddBuiltin(BuiltinOperator_STABLEHLO_ADD, Register_STABLEHLO_ADD());
AddBuiltin(BuiltinOperator_STABLEHLO_MULTIPLY, Register_STABLEHLO_MULTIPLY());
AddBuiltin(BuiltinOperator_STABLEHLO_MAXIMUM, Register_STABLEHLO_MAXIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_MINIMUM, Register_STABLEHLO_MINIMUM());
AddBuiltin(BuiltinOperator_STABLEHLO_PAD, Register_STABLEHLO_PAD());
AddBuiltin(BuiltinOperator_STABLEHLO_COMPOSITE,
Register_STABLEHLO_COMPOSITE());
AddCustom("NumericVerify", tflite::ops::custom::Register_NUMERIC_VERIFY());
AddCustom("Mfcc", tflite::ops::custom::Register_MFCC());
AddCustom("AudioSpectrogram",
tflite::ops::custom::Register_AUDIO_SPECTROGRAM());
AddCustom("TFLite_Detection_PostProcess",
tflite::ops::custom::Register_DETECTION_POSTPROCESS());
may_directly_contain_user_defined_ops_ = false;
delegate_creators_.push_back([](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context,
XNNPackQS8Options::default_value);
});
}
BuiltinOpResolverWithXNNPACK::BuiltinOpResolverWithXNNPACK(
bool enable_xnnpack_unsigned_quantized) {
delegate_creators_.clear();
XNNPackQS8Options xnnpack_qs8_options = enable_xnnpack_unsigned_quantized
? XNNPackQS8Options::enabled
: XNNPackQS8Options::disabled;
delegate_creators_.push_back([xnnpack_qs8_options](TfLiteContext* context) {
return tflite::MaybeCreateXNNPACKDelegate(context, xnnpack_qs8_options);
});
}
}
}
} | #include "tensorflow/lite/core/kernels/register.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite::ops::builtin {
namespace {
TEST(BuiltinOpResolverTest, SupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
const TfLiteRegistration *add =
builtin_op_resolver.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
TEST(BuiltinOpResolverTest, CopySupportsAdd) {
BuiltinOpResolver builtin_op_resolver;
MutableOpResolver copy = builtin_op_resolver;
const TfLiteRegistration *add = copy.FindOp(::tflite::BuiltinOperator_ADD, 1);
ASSERT_NE(add, nullptr);
ASSERT_NE(add->init, nullptr);
ASSERT_NE(add->free, nullptr);
ASSERT_NE(add->prepare, nullptr);
ASSERT_NE(add->invoke, nullptr);
}
#if defined(TFLITE_WITHOUT_XNNPACK)
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, HasXNNPACKDelegate_QS8_QU8) {
BuiltinOpResolver builtin_op_resolver;
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8,
TFLITE_XNNPACK_DELEGATE_FLAG_QU8);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
TEST(BuiltinOpResolverTest, Disable_QU8) {
BuiltinOpResolverWithXNNPACK builtin_op_resolver(false);
ASSERT_EQ(builtin_op_resolver.GetDelegateCreators().size(), 1);
BuiltinOpResolver::TfLiteDelegateCreator delegate_creator =
builtin_op_resolver.GetDelegateCreators()[0];
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate *)> delegate =
delegate_creator(nullptr);
const TfLiteXNNPackDelegateOptions *options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QU8, 0);
ASSERT_EQ(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_QS8,
TFLITE_XNNPACK_DELEGATE_FLAG_QS8);
}
#endif
}
} |
9 | #ifndef ABSL_SYNCHRONIZATION_BARRIER_H_
#define ABSL_SYNCHRONIZATION_BARRIER_H_
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class Barrier {
public:
explicit Barrier(int num_threads)
: num_to_block_(num_threads), num_to_exit_(num_threads) {}
Barrier(const Barrier&) = delete;
Barrier& operator=(const Barrier&) = delete;
bool Block();
private:
Mutex lock_;
int num_to_block_ ABSL_GUARDED_BY(lock_);
int num_to_exit_ ABSL_GUARDED_BY(lock_);
};
ABSL_NAMESPACE_END
}
#endif
#include "absl/synchronization/barrier.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
static bool IsZero(void *arg) {
return 0 == *reinterpret_cast<int *>(arg);
}
bool Barrier::Block() {
MutexLock l(&this->lock_);
this->num_to_block_--;
if (this->num_to_block_ < 0) {
ABSL_RAW_LOG(
FATAL,
"Block() called too many times. num_to_block_=%d out of total=%d",
this->num_to_block_, this->num_to_exit_);
}
this->lock_.Await(Condition(IsZero, &this->num_to_block_));
this->num_to_exit_--;
ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
return this->num_to_exit_ == 0;
}
ABSL_NAMESPACE_END
} | #include "absl/synchronization/barrier.h"
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
TEST(Barrier, SanityTest) {
constexpr int kNumThreads = 10;
absl::Barrier* barrier = new absl::Barrier(kNumThreads);
absl::Mutex mutex;
int counter = 0;
auto thread_func = [&] {
if (barrier->Block()) {
delete barrier;
}
absl::MutexLock lock(&mutex);
++counter;
};
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads - 1; ++i) {
threads.push_back(std::thread(thread_func));
}
absl::SleepFor(absl::Seconds(1));
{
absl::MutexLock lock(&mutex);
EXPECT_EQ(counter, 0);
}
threads.push_back(std::thread(thread_func));
for (auto& thread : threads) {
thread.join();
}
absl::MutexLock lock(&mutex);
EXPECT_EQ(counter, kNumThreads);
} |
10 | #ifndef TENSORFLOW_CORE_LIB_CORE_ARENA_H_
#define TENSORFLOW_CORE_LIB_CORE_ARENA_H_
#include <assert.h>
#include <vector>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace core {
class Arena {
public:
explicit Arena(const size_t block_size);
~Arena();
char* Alloc(const size_t size) {
return reinterpret_cast<char*>(GetMemory(size, 1));
}
char* AllocAligned(const size_t size, const size_t alignment) {
return reinterpret_cast<char*>(GetMemory(size, alignment));
}
void Reset();
#ifdef __i386__
static const int kDefaultAlignment = 4;
#else
static constexpr int kDefaultAlignment = 8;
#endif
protected:
bool SatisfyAlignment(const size_t alignment);
void MakeNewBlock(const uint32 alignment);
void* GetMemoryFallback(const size_t size, const int align);
void* GetMemory(const size_t size, const int align) {
assert(remaining_ <= block_size_);
if (size > 0 && size < remaining_ && align == 1) {
void* result = freestart_;
freestart_ += size;
remaining_ -= size;
return result;
}
return GetMemoryFallback(size, align);
}
size_t remaining_;
private:
struct AllocatedBlock {
char* mem;
size_t size;
};
AllocatedBlock* AllocNewBlock(const size_t block_size,
const uint32 alignment);
const size_t block_size_;
char* freestart_;
char* freestart_when_empty_;
size_t blocks_alloced_;
AllocatedBlock first_blocks_[16];
std::vector<AllocatedBlock>* overflow_blocks_;
void FreeBlocks();
Arena(const Arena&) = delete;
void operator=(const Arena&) = delete;
};
}
}
#endif
#include "tensorflow/core/lib/core/arena.h"
#include <assert.h>
#include <algorithm>
#include <vector>
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mem.h"
namespace tensorflow {
namespace core {
Arena::Arena(const size_t block_size)
: remaining_(0),
block_size_(block_size),
freestart_(nullptr),
blocks_alloced_(1),
overflow_blocks_(nullptr) {
assert(block_size > kDefaultAlignment);
first_blocks_[0].mem =
reinterpret_cast<char*>(port::AlignedMalloc(block_size_, sizeof(void*)));
first_blocks_[0].size = block_size_;
Reset();
}
Arena::~Arena() {
FreeBlocks();
assert(overflow_blocks_ == nullptr);
for (size_t i = 0; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
}
}
bool Arena::SatisfyAlignment(size_t alignment) {
const size_t overage = reinterpret_cast<size_t>(freestart_) & (alignment - 1);
if (overage > 0) {
const size_t waste = alignment - overage;
if (waste >= remaining_) {
return false;
}
freestart_ += waste;
remaining_ -= waste;
}
DCHECK_EQ(size_t{0}, reinterpret_cast<size_t>(freestart_) & (alignment - 1));
return true;
}
void Arena::Reset() {
FreeBlocks();
freestart_ = first_blocks_[0].mem;
remaining_ = first_blocks_[0].size;
CHECK(SatisfyAlignment(kDefaultAlignment));
freestart_when_empty_ = freestart_;
}
void Arena::MakeNewBlock(const uint32 alignment) {
AllocatedBlock* block = AllocNewBlock(block_size_, alignment);
freestart_ = block->mem;
remaining_ = block->size;
CHECK(SatisfyAlignment(alignment));
}
static uint32 LeastCommonMultiple(uint32 a, uint32 b) {
if (a > b) {
return (a / MathUtil::GCD<uint32>(a, b)) * b;
} else if (a < b) {
return (b / MathUtil::GCD<uint32>(b, a)) * a;
} else {
return a;
}
}
Arena::AllocatedBlock* Arena::AllocNewBlock(const size_t block_size,
const uint32 alignment) {
AllocatedBlock* block;
if (blocks_alloced_ < TF_ARRAYSIZE(first_blocks_)) {
block = &first_blocks_[blocks_alloced_++];
} else {
if (overflow_blocks_ == nullptr)
overflow_blocks_ = new std::vector<AllocatedBlock>;
overflow_blocks_->resize(overflow_blocks_->size() + 1);
block = &overflow_blocks_->back();
}
uint32 adjusted_alignment =
(alignment > 1 ? LeastCommonMultiple(alignment, kDefaultAlignment) : 1);
adjusted_alignment =
std::max(adjusted_alignment, static_cast<uint32>(sizeof(void*)));
CHECK_LE(adjusted_alignment, static_cast<uint32>(1 << 20))
<< "Alignment on boundaries greater than 1MB not supported.";
size_t adjusted_block_size = block_size;
if (adjusted_block_size > adjusted_alignment) {
const uint32 excess = adjusted_block_size % adjusted_alignment;
adjusted_block_size += (excess > 0 ? adjusted_alignment - excess : 0);
}
block->mem = reinterpret_cast<char*>(
port::AlignedMalloc(adjusted_block_size, adjusted_alignment));
block->size = adjusted_block_size;
CHECK(nullptr != block->mem) << "block_size=" << block_size
<< " adjusted_block_size=" << adjusted_block_size
<< " alignment=" << alignment
<< " adjusted_alignment=" << adjusted_alignment;
return block;
}
void* Arena::GetMemoryFallback(const size_t size, const int alignment) {
if (0 == size) {
return nullptr;
}
CHECK(alignment > 0 && 0 == (alignment & (alignment - 1)));
if (block_size_ == 0 || size > block_size_ / 4) {
return AllocNewBlock(size, alignment)->mem;
}
if (!SatisfyAlignment(alignment) || size > remaining_) {
MakeNewBlock(alignment);
}
CHECK_LE(size, remaining_);
remaining_ -= size;
void* result = freestart_;
freestart_ += size;
return result;
}
void Arena::FreeBlocks() {
for (size_t i = 1; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
first_blocks_[i].mem = nullptr;
first_blocks_[i].size = 0;
}
blocks_alloced_ = 1;
if (overflow_blocks_ != nullptr) {
std::vector<AllocatedBlock>::iterator it;
for (it = overflow_blocks_->begin(); it != overflow_blocks_->end(); ++it) {
port::AlignedFree(it->mem);
}
delete overflow_blocks_;
overflow_blocks_ = nullptr;
}
}
}
} | #include "tensorflow/core/lib/core/arena.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
namespace {
static void TestMemory(void* mem, int size) {
memset(mem, 0xaa, size);
char* tmp[100];
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
tmp[i] = new char[i * i + 1];
}
memset(mem, 0xcc, size);
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
delete[] tmp[i];
}
memset(mem, 0xee, size);
}
TEST(ArenaTest, TestBasicArena) {
Arena a(1024);
char* memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
}
TEST(ArenaTest, TestAlignment) {
Arena a(1024);
char* byte0 = a.Alloc(1);
char* alloc_aligned8 = a.AllocAligned(17, 8);
EXPECT_EQ(alloc_aligned8 - byte0, 8);
char* alloc_aligned8_b = a.AllocAligned(8, 8);
EXPECT_EQ(alloc_aligned8_b - alloc_aligned8, 24);
char* alloc_aligned8_c = a.AllocAligned(16, 8);
EXPECT_EQ(alloc_aligned8_c - alloc_aligned8_b, 8);
char* alloc_aligned8_d = a.AllocAligned(8, 1);
EXPECT_EQ(alloc_aligned8_d - alloc_aligned8_c, 16);
}
TEST(ArenaTest, TestVariousArenaSizes) {
{
Arena a(1024);
char* memory = a.Alloc(1024);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 1024);
char* memory2 = a.Alloc(1024);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1024);
}
{
Arena a(1024);
char* memory = a.Alloc(768);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 768);
char* memory2 = a.Alloc(768);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 768);
}
{
Arena a(1024);
char* memory = a.Alloc(10240);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 10240);
char* memory2 = a.Alloc(1234);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1234);
}
}
}
}
} |
11 | #ifndef ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
#define ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_
#include <atomic>
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/create_thread_identity.h"
#include "absl/synchronization/internal/kernel_timeout.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class Mutex;
namespace synchronization_internal {
class PerThreadSem {
public:
PerThreadSem() = delete;
PerThreadSem(const PerThreadSem&) = delete;
PerThreadSem& operator=(const PerThreadSem&) = delete;
static void Tick(base_internal::ThreadIdentity* identity);
static void SetThreadBlockedCounter(std::atomic<int> *counter);
static std::atomic<int> *GetThreadBlockedCounter();
private:
static inline void Init(base_internal::ThreadIdentity* identity);
static inline void Post(base_internal::ThreadIdentity* identity);
static inline bool Wait(KernelTimeout t);
friend class PerThreadSemTest;
friend class absl::Mutex;
friend void OneTimeInitThreadIdentity(absl::base_internal::ThreadIdentity*);
};
}
ABSL_NAMESPACE_END
}
extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
absl::base_internal::ThreadIdentity* identity);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity* identity);
bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
absl::base_internal::ThreadIdentity* identity);
}
void absl::synchronization_internal::PerThreadSem::Init(
absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(identity);
}
void absl::synchronization_internal::PerThreadSem::Post(
absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
}
bool absl::synchronization_internal::PerThreadSem::Wait(
absl::synchronization_internal::KernelTimeout t) {
return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t);
}
#endif
#include "absl/base/internal/low_level_alloc.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#include "absl/synchronization/internal/per_thread_sem.h"
#include <atomic>
#include "absl/base/attributes.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/waiter.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
base_internal::ThreadIdentity *identity;
identity = GetOrCreateCurrentThreadIdentity();
identity->blocked_count_ptr = counter;
}
std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
base_internal::ThreadIdentity *identity;
identity = GetOrCreateCurrentThreadIdentity();
return identity->blocked_count_ptr;
}
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity);
}
}
}
ABSL_NAMESPACE_END
}
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
absl::base_internal::ThreadIdentity *identity) {
new (absl::synchronization_internal::Waiter::GetWaiter(identity))
absl::synchronization_internal::Waiter();
}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke();
}
ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
absl::base_internal::ThreadIdentity *identity;
identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
int ticker = identity->ticker.load(std::memory_order_relaxed);
identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
identity->is_idle.store(false, std::memory_order_relaxed);
if (identity->blocked_count_ptr != nullptr) {
identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
}
timeout =
!absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
if (identity->blocked_count_ptr != nullptr) {
identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
}
identity->is_idle.store(false, std::memory_order_relaxed);
identity->wait_start.store(0, std::memory_order_relaxed);
return !timeout;
}
}
#endif | #include "absl/synchronization/internal/per_thread_sem.h"
#include <atomic>
#include <condition_variable>
#include <functional>
#include <limits>
#include <mutex>
#include <string>
#include <thread>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
class SimpleSemaphore {
public:
SimpleSemaphore() : count_(0) {}
void Wait() {
std::unique_lock<std::mutex> lock(mu_);
cv_.wait(lock, [this]() { return count_ > 0; });
--count_;
cv_.notify_one();
}
void Post() {
std::lock_guard<std::mutex> lock(mu_);
++count_;
cv_.notify_one();
}
private:
std::mutex mu_;
std::condition_variable cv_;
int count_;
};
struct ThreadData {
int num_iterations;
SimpleSemaphore identity2_written;
base_internal::ThreadIdentity *identity1;
base_internal::ThreadIdentity *identity2;
KernelTimeout timeout;
};
class PerThreadSemTest : public testing::Test {
public:
static void TimingThread(ThreadData* t) {
t->identity2 = GetOrCreateCurrentThreadIdentity();
t->identity2_written.Post();
while (t->num_iterations--) {
Wait(t->timeout);
Post(t->identity1);
}
}
void TestTiming(const char *msg, bool timeout) {
static const int kNumIterations = 100;
ThreadData t;
t.num_iterations = kNumIterations;
t.timeout = timeout ?
KernelTimeout(absl::Now() + absl::Seconds(10000))
: KernelTimeout::Never();
t.identity1 = GetOrCreateCurrentThreadIdentity();
std::thread partner_thread(std::bind(TimingThread, &t));
t.identity2_written.Wait();
int64_t min_cycles = std::numeric_limits<int64_t>::max();
int64_t total_cycles = 0;
for (int i = 0; i < kNumIterations; ++i) {
absl::SleepFor(absl::Milliseconds(20));
int64_t cycles = base_internal::CycleClock::Now();
Post(t.identity2);
Wait(t.timeout);
cycles = base_internal::CycleClock::Now() - cycles;
min_cycles = std::min(min_cycles, cycles);
total_cycles += cycles;
}
std::string out = StrCat(
msg, "min cycle count=", min_cycles, " avg cycle count=",
absl::SixDigits(static_cast<double>(total_cycles) / kNumIterations));
printf("%s\n", out.c_str());
partner_thread.join();
}
protected:
static void Post(base_internal::ThreadIdentity *id) {
PerThreadSem::Post(id);
}
static bool Wait(KernelTimeout t) {
return PerThreadSem::Wait(t);
}
static bool Wait(absl::Time t) {
return Wait(KernelTimeout(t));
}
static void Tick(base_internal::ThreadIdentity *identity) {
PerThreadSem::Tick(identity);
}
};
namespace {
TEST_F(PerThreadSemTest, WithoutTimeout) {
PerThreadSemTest::TestTiming("Without timeout: ", false);
}
TEST_F(PerThreadSemTest, WithTimeout) {
PerThreadSemTest::TestTiming("With timeout: ", true);
}
TEST_F(PerThreadSemTest, Timeouts) {
const absl::Duration delay = absl::Milliseconds(50);
const absl::Time start = absl::Now();
EXPECT_FALSE(Wait(start + delay));
const absl::Duration elapsed = absl::Now() - start;
absl::Duration slop = absl::Milliseconds(1);
#ifdef _MSC_VER
slop = absl::Milliseconds(16);
#endif
EXPECT_LE(delay - slop, elapsed)
<< "Wait returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;
absl::Time negative_timeout = absl::UnixEpoch() - absl::Milliseconds(100);
EXPECT_FALSE(Wait(negative_timeout));
EXPECT_LE(negative_timeout, absl::Now() + slop);
Post(GetOrCreateCurrentThreadIdentity());
EXPECT_TRUE(Wait(negative_timeout));
}
TEST_F(PerThreadSemTest, ThreadIdentityReuse) {
for (int i = 0; i < 10000; i++) {
std::thread t([]() { GetOrCreateCurrentThreadIdentity(); });
t.join();
}
}
}
}
ABSL_NAMESPACE_END
} |
12 | #ifndef QUICHE_QUIC_CORE_QUIC_PACKETS_H_
#define QUICHE_QUIC_CORE_QUIC_PACKETS_H_
#include <sys/types.h>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/frames/quic_frame.h"
#include "quiche/quic/core/quic_ack_listener_interface.h"
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_export.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
namespace quic {
class QuicPacket;
struct QuicPacketHeader;
QUICHE_EXPORT QuicConnectionId GetServerConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionId GetClientConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionId GetServerConnectionIdAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionIdIncluded GetServerConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionId GetClientConnectionIdAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT QuicConnectionIdIncluded GetClientConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective);
QUICHE_EXPORT uint8_t
GetIncludedConnectionIdLength(QuicConnectionId connection_id,
QuicConnectionIdIncluded connection_id_included);
QUICHE_EXPORT uint8_t
GetIncludedDestinationConnectionIdLength(const QuicPacketHeader& header);
QUICHE_EXPORT uint8_t
GetIncludedSourceConnectionIdLength(const QuicPacketHeader& header);
QUICHE_EXPORT size_t GetPacketHeaderSize(QuicTransportVersion version,
const QuicPacketHeader& header);
QUICHE_EXPORT size_t GetPacketHeaderSize(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length);
QUICHE_EXPORT size_t GetStartOfEncryptedData(QuicTransportVersion version,
const QuicPacketHeader& header);
QUICHE_EXPORT size_t GetStartOfEncryptedData(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length);
struct QUICHE_EXPORT QuicPacketHeader {
QuicPacketHeader();
QuicPacketHeader(const QuicPacketHeader& other);
~QuicPacketHeader();
QuicPacketHeader& operator=(const QuicPacketHeader& other);
QUICHE_EXPORT friend std::ostream& operator<<(std::ostream& os,
const QuicPacketHeader& header);
QuicConnectionId destination_connection_id;
QuicConnectionIdIncluded destination_connection_id_included;
QuicConnectionId source_connection_id;
QuicConnectionIdIncluded source_connection_id_included;
bool reset_flag;
bool version_flag;
bool has_possible_stateless_reset_token;
QuicPacketNumberLength packet_number_length;
uint8_t type_byte;
ParsedQuicVersion version;
DiversificationNonce* nonce;
QuicPacketNumber packet_number;
PacketHeaderFormat form;
QuicLongHeaderType long_packet_type;
StatelessResetToken possible_stateless_reset_token;
quiche::QuicheVariableLengthIntegerLength retry_token_length_length;
absl::string_view retry_token;
quiche::QuicheVariableLengthIntegerLength length_length;
QuicByteCount remaining_packet_length;
bool operator==(const QuicPacketHeader& other) const;
bool operator!=(const QuicPacketHeader& other) const;
};
struct QUICHE_EXPORT QuicPublicResetPacket {
QuicPublicResetPacket();
explicit QuicPublicResetPacket(QuicConnectionId connection_id);
QuicConnectionId connection_id;
QuicPublicResetNonceProof nonce_proof;
QuicSocketAddress client_address;
std::string endpoint_id;
};
struct QUICHE_EXPORT QuicVersionNegotiationPacket {
QuicVersionNegotiationPacket();
explicit QuicVersionNegotiationPacket(QuicConnectionId connection_id);
QuicVersionNegotiationPacket(const QuicVersionNegotiationPacket& other);
~QuicVersionNegotiationPacket();
QuicConnectionId connection_id;
ParsedQuicVersionVector versions;
};
struct QUICHE_EXPORT QuicIetfStatelessResetPacket {
QuicIetfStatelessResetPacket();
QuicIetfStatelessResetPacket(const QuicPacketHeader& header,
StatelessResetToken token);
QuicIetfStatelessResetPacket(const QuicIetfStatelessResetPacket& other);
~QuicIetfStatelessResetPacket();
QuicPacketHeader header;
StatelessResetToken stateless_reset_token;
};
class QUICHE_EXPORT QuicData {
public:
QuicData(const char* buffer, size_t length);
QuicData(const char* buffer, size_t length, bool owns_buffer);
QuicData(absl::string_view data);
QuicData(const QuicData&) = delete;
QuicData& operator=(const QuicData&) = delete;
virtual ~QuicData();
absl::string_view AsStringPiece() const {
return absl::string_view(data(), length());
}
const char* data() const { return buffer_; }
size_t length() const { return length_; }
private:
const char* buffer_;
size_t length_;
bool owns_buffer_;
};
class QUICHE_EXPORT QuicPacket : public QuicData {
public:
QuicPacket(
char* buffer, size_t length, bool owns_buffer,
uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool includes_version,
bool includes_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length);
QuicPacket(QuicTransportVersion version, char* buffer, size_t length,
bool owns_buffer, const QuicPacketHeader& header);
QuicPacket(const QuicPacket&) = delete;
QuicPacket& operator=(const QuicPacket&) = delete;
absl::string_view AssociatedData(QuicTransportVersion version) const;
absl::string_view Plaintext(QuicTransportVersion version) const;
char* mutable_data() { return buffer_; }
private:
char* buffer_;
const uint8_t destination_connection_id_length_;
const uint8_t source_connection_id_length_;
const bool includes_version_;
const bool includes_diversification_nonce_;
const QuicPacketNumberLength packet_number_length_;
const quiche::QuicheVariableLengthIntegerLength retry_token_length_length_;
const QuicByteCount retry_token_length_;
const quiche::QuicheVariableLengthIntegerLength length_length_;
};
class QUICHE_EXPORT QuicEncryptedPacket : public QuicData {
public:
QuicEncryptedPacket(const char* buffer, size_t length);
QuicEncryptedPacket(const char* buffer, size_t length, bool owns_buffer);
QuicEncryptedPacket(absl::string_view data);
QuicEncryptedPacket(const QuicEncryptedPacket&) = delete;
QuicEncryptedPacket& operator=(const QuicEncryptedPacket&) = delete;
std::unique_ptr<QuicEncryptedPacket> Clone() const;
QUICHE_EXPORT friend std::ostream& operator<<(std::ostream& os,
const QuicEncryptedPacket& s);
};
namespace test {
class QuicReceivedPacketPeer;
}
class QUICHE_EXPORT QuicReceivedPacket : public QuicEncryptedPacket {
public:
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer, int ttl, bool ttl_valid);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer, int ttl, bool ttl_valid,
char* packet_headers, size_t headers_length,
bool owns_header_buffer);
QuicReceivedPacket(const char* buffer, size_t length, QuicTime receipt_time,
bool owns_buffer, int ttl, bool ttl_valid,
char* packet_headers, size_t headers_length,
bool owns_header_buffer, QuicEcnCodepoint ecn_codepoint);
~QuicReceivedPacket();
QuicReceivedPacket(const QuicReceivedPacket&) = delete;
QuicReceivedPacket& operator=(const QuicReceivedPacket&) = delete;
std::unique_ptr<QuicReceivedPacket> Clone() const;
QuicTime receipt_time() const { return receipt_time_; }
int ttl() const { return ttl_; }
char* packet_headers() const { return packet_headers_; }
int headers_length() const { return headers_length_; }
QUICHE_EXPORT friend std::ostream& operator<<(std::ostream& os,
const QuicReceivedPacket& s);
QuicEcnCodepoint ecn_codepoint() const { return ecn_codepoint_; }
private:
friend class test::QuicReceivedPacketPeer;
const QuicTime receipt_time_;
int ttl_;
char* packet_headers_;
int headers_length_;
bool owns_header_buffer_;
QuicEcnCodepoint ecn_codepoint_;
};
struct QUICHE_EXPORT SerializedPacket {
SerializedPacket(QuicPacketNumber packet_number,
QuicPacketNumberLength packet_number_length,
const char* encrypted_buffer,
QuicPacketLength encrypted_length, bool has_ack,
bool has_stop_waiting);
SerializedPacket(const SerializedPacket& other) = delete;
SerializedPacket& operator=(const SerializedPacket& other) = delete;
SerializedPacket(SerializedPacket&& other);
~SerializedPacket();
const char* encrypted_buffer;
QuicPacketLength encrypted_length;
std::function<void(const char*)> release_encrypted_buffer;
QuicFrames retransmittable_frames;
QuicFrames nonretransmittable_frames;
IsHandshake has_crypto_handshake;
QuicPacketNumber packet_number;
QuicPacketNumberLength packet_number_length;
EncryptionLevel encryption_level;
bool has_ack;
bool has_stop_waiting;
bool has_ack_ecn = false;
TransmissionType transmission_type;
QuicPacketNumber largest_acked;
bool has_ack_frame_copy;
bool has_ack_frequency;
bool has_message;
SerializedPacketFate fate;
QuicSocketAddress peer_address;
std::optional<QuicByteCount> bytes_not_retransmitted;
std::optional<QuicPacketHeader> initial_header;
};
QUICHE_EXPORT SerializedPacket* CopySerializedPacket(
const SerializedPacket& serialized,
quiche::QuicheBufferAllocator* allocator, bool copy_buffer);
QUICHE_EXPORT char* CopyBuffer(const SerializedPacket& packet);
QUICHE_EXPORT char* CopyBuffer(const char* encrypted_buffer,
QuicPacketLength encrypted_length);
struct QUICHE_EXPORT QuicPerPacketContext {
virtual ~QuicPerPacketContext() {}
};
struct QUICHE_EXPORT ReceivedPacketInfo {
ReceivedPacketInfo(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet);
ReceivedPacketInfo(const ReceivedPacketInfo& other) = default;
~ReceivedPacketInfo();
std::string ToString() const;
QUICHE_EXPORT friend std::ostream& operator<<(
std::ostream& os, const ReceivedPacketInfo& packet_info);
const QuicSocketAddress& self_address;
const QuicSocketAddress& peer_address;
const QuicReceivedPacket& packet;
PacketHeaderFormat form;
QuicLongHeaderType long_packet_type;
bool version_flag;
bool use_length_prefix;
QuicVersionLabel version_label;
ParsedQuicVersion version;
QuicConnectionId destination_connection_id;
QuicConnectionId source_connection_id;
std::optional<absl::string_view> retry_token;
};
}
#endif
#include "quiche/quic/core/quic_packets.h"
#include <algorithm>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
QuicConnectionId GetServerConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_SERVER) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetClientConnectionIdAsRecipient(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionId GetServerConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id;
}
return header.source_connection_id;
}
QuicConnectionIdIncluded GetServerConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.destination_connection_id_included;
}
return header.source_connection_id_included;
}
QuicConnectionId GetClientConnectionIdAsSender(const QuicPacketHeader& header,
Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id;
}
return header.destination_connection_id;
}
QuicConnectionIdIncluded GetClientConnectionIdIncludedAsSender(
const QuicPacketHeader& header, Perspective perspective) {
if (perspective == Perspective::IS_CLIENT) {
return header.source_connection_id_included;
}
return header.destination_connection_id_included;
}
uint8_t GetIncludedConnectionIdLength(
QuicConnectionId connection_id,
QuicConnectionIdIncluded connection_id_included) {
QUICHE_DCHECK(connection_id_included == CONNECTION_ID_PRESENT ||
connection_id_included == CONNECTION_ID_ABSENT);
return connection_id_included == CONNECTION_ID_PRESENT
? connection_id.length()
: 0;
}
uint8_t GetIncludedDestinationConnectionIdLength(
const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(
header.destination_connection_id,
header.destination_connection_id_included);
}
uint8_t GetIncludedSourceConnectionIdLength(const QuicPacketHeader& header) {
return GetIncludedConnectionIdLength(header.source_connection_id,
header.source_connection_id_included);
}
size_t GetPacketHeaderSize(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(
version, GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header), header.version_flag,
header.nonce != nullptr, header.packet_number_length,
header.retry_token_length_length, header.retry_token.length(),
header.length_length);
}
size_t GetPacketHeaderSize(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
if (include_version) {
size_t size = kPacketHeaderTypeSize + kConnectionIdLengthSize +
destination_connection_id_length +
source_connection_id_length + packet_number_length +
kQuicVersionSize;
if (include_diversification_nonce) {
size += kDiversificationNonceSize;
}
if (VersionHasLengthPrefixedConnectionIds(version)) {
size += kConnectionIdLengthSize;
}
QUICHE_DCHECK(
QuicVersionHasLongHeaderLengths(version) ||
retry_token_length_length + retry_token_length + length_length == 0);
if (QuicVersionHasLongHeaderLengths(version)) {
size += retry_token_length_length + retry_token_length + length_length;
}
return size;
}
return kPacketHeaderTypeSize + destination_connection_id_length +
packet_number_length;
}
size_t GetStartOfEncryptedData(QuicTransportVersion version,
const QuicPacketHeader& header) {
return GetPacketHeaderSize(version, header);
}
size_t GetStartOfEncryptedData(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length) {
return GetPacketHeaderSize(
version, destination_connection_id_length, source_connection_id_length,
include_version, include_diversification_nonce, packet_number_length,
retry_token_length_length, retry_token_length, length_length);
}
QuicPacketHeader::QuicPacketHeader()
: destination_connection_id(EmptyQuicConnectionId()),
destination_connection_id_included(CONNECTION_ID_PRESENT),
source_connection_id(EmptyQuicConnectionId()),
source_connection_id_included(CONNECTION_ID_ABSENT),
reset_flag(false),
version_flag(false),
has_possible_stateless_reset_token(false),
packet_number_length(PACKET_4BYTE_PACKET_NUMBER),
type_byte(0),
version(UnsupportedQuicVersion()),
nonce(nullptr),
form(GOOGLE_QUIC_PACKET),
long_packet_type(INITIAL),
possible_stateless_reset_token({}),
retry_token_length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
retry_token(absl::string_view()),
length_length(quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0),
remaining_packet_length(0) {}
QuicPacketHeader::QuicPacketHeader(const QuicPacketHeader& other) = default;
QuicPacketHeader::~QuicPacketHeader() {}
QuicPacketHeader& QuicPacketHeader::operator=(const QuicPacketHeader& other) =
default;
QuicPublicResetPacket::QuicPublicResetPacket()
: connection_id(EmptyQuicConnectionId()), nonce_proof(0) {}
QuicPublicResetPacket::QuicPublicResetPacket(QuicConnectionId connection_id)
: connection_id(connection_id), nonce_proof(0) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket()
: connection_id(EmptyQuicConnectionId()) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
QuicConnectionId connection_id)
: connection_id(connection_id) {}
QuicVersionNegotiationPacket::QuicVersionNegotiationPacket(
const QuicVersionNegotiationPacket& other) = default;
QuicVersionNegotiationPacket::~QuicVersionNegotiationPacket() {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket()
: stateless_reset_token({}) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicPacketHeader& header, StatelessResetToken token)
: header(header), stateless_reset_token(token) {}
QuicIetfStatelessResetPacket::QuicIetfStatelessResetPacket(
const QuicIetfStatelessResetPacket& other) = default;
QuicIetfStatelessResetPacket::~QuicIetfStatelessResetPacket() {}
std::ostream& operator<<(std::ostream& os, const QuicPacketHeader& header) {
os << "{ destination_connection_id: " << header.destination_connection_id
<< " ("
<< (header.destination_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), source_connection_id: " << header.source_connection_id << " ("
<< (header.source_connection_id_included == CONNECTION_ID_PRESENT
? "present"
: "absent")
<< "), packet_number_length: "
<< static_cast<int>(header.packet_number_length)
<< ", reset_flag: " << header.reset_flag
<< ", version_flag: " << header.version_flag;
if (header.version_flag) {
os << ", version: " << ParsedQuicVersionToString(header.version);
if (header.long_packet_type != INVALID_PACKET_TYPE) {
os << ", long_packet_type: "
<< QuicUtils::QuicLongHeaderTypetoString(header.long_packet_type);
}
if (header.retry_token_length_length !=
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", retry_token_length_length: "
<< static_cast<int>(header.retry_token_length_length);
}
if (header.retry_token.length() != 0) {
os << ", retry_token_length: " << header.retry_token.length();
}
if (header.length_length != quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0) {
os << ", length_length: " << static_cast<int>(header.length_length);
}
if (header.remaining_packet_length != 0) {
os << ", remaining_packet_length: " << header.remaining_packet_length;
}
}
if (header.nonce != nullptr) {
os << ", diversification_nonce: "
<< absl::BytesToHexString(
absl::string_view(header.nonce->data(), header.nonce->size()));
}
os << ", packet_number: " << header.packet_number << " }\n";
return os;
}
QuicData::QuicData(const char* buffer, size_t length)
: buffer_(buffer), length_(length), owns_buffer_(false) {}
QuicData::QuicData(const char* buffer, size_t length, bool owns_buffer)
: buffer_(buffer), length_(length), owns_buffer_(owns_buffer) {}
QuicData::QuicData(absl::string_view packet_data)
: buffer_(packet_data.data()),
length_(packet_data.length()),
owns_buffer_(false) {}
QuicData::~QuicData() {
if (owns_buffer_) {
delete[] const_cast<char*>(buffer_);
}
}
QuicPacket::QuicPacket(
char* buffer, size_t length, bool owns_buffer,
uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool includes_version,
bool includes_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
QuicByteCount retry_token_length,
quiche::QuicheVariableLengthIntegerLength length_length)
: QuicData(buffer, length, owns_buffer),
buffer_(buffer),
destination_connection_id_length_(destination_connection_id_length),
source_connection_id_length_(source_connection_id_length),
includes_version_(includes_version),
includes_diversification_nonce_(includes_diversification_nonce),
packet_number_length_(packet_number_length),
retry_token_length_length_(retry_token_length_length),
retry_token_length_(retry_token_length),
length_length_(length_length) {}
QuicPacket::QuicPacket(QuicTransportVersion , char* buffer,
size_t length, bool owns_buffer,
const QuicPacketHeader& header)
: QuicPacket(buffer, length, owns_buffer,
GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header),
header.version_flag, header.nonce != nullptr,
header.packet_number_length, header.retry_token_length_length,
header.retry_token.length(), header.length_length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length)
: QuicData(buffer, length) {}
QuicEncryptedPacket::QuicEncryptedPacket(const char* buffer, size_t length,
bool owns_buffer)
: QuicData(buffer, length, owns_buffer) {}
QuicEncryptedPacket::QuicEncryptedPacket(absl::string_view data)
: QuicData(data) {}
std::unique_ptr<QuicEncryptedPacket> QuicEncryptedPacket::Clone() const {
char* buffer = new char[this->length()];
std::copy(this->data(), this->data() + this->length(), buffer);
return std::make_unique<QuicEncryptedPacket>(buffer, this->length(), true);
}
std::ostream& operator<<(std::ostream& os, const QuicEncryptedPacket& s) {
os << s.length() << "-byte data";
return os;
}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time)
: QuicReceivedPacket(buffer, length, receipt_time,
false ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer)
: QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, 0 ,
true ) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid)
: quic::QuicReceivedPacket(buffer, length, receipt_time, owns_buffer, ttl,
ttl_valid, nullptr ,
0 ,
false , ECN_NOT_ECT) {}
QuicReceivedPacket::QuicReceivedPacket(const char* buffer, size_t length,
QuicTime receipt_time, bool owns_buffer,
int ttl, bool ttl_valid,
char* packet_heade | #include "quiche/quic/core/quic_packets.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
QuicPacketHeader CreateFakePacketHeader() {
QuicPacketHeader header;
header.destination_connection_id = TestConnectionId(1);
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.source_connection_id = TestConnectionId(2);
header.source_connection_id_included = CONNECTION_ID_ABSENT;
return header;
}
class QuicPacketsTest : public QuicTest {};
TEST_F(QuicPacketsTest, GetServerConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetServerConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetServerConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetServerConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_ABSENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_PRESENT, GetServerConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdIncludedAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(CONNECTION_ID_PRESENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_SERVER));
EXPECT_EQ(CONNECTION_ID_ABSENT, GetClientConnectionIdIncludedAsSender(
header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsRecipient) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsRecipient(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsRecipient(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, GetClientConnectionIdAsSender) {
QuicPacketHeader header = CreateFakePacketHeader();
EXPECT_EQ(TestConnectionId(1),
GetClientConnectionIdAsSender(header, Perspective::IS_SERVER));
EXPECT_EQ(TestConnectionId(2),
GetClientConnectionIdAsSender(header, Perspective::IS_CLIENT));
}
TEST_F(QuicPacketsTest, CopyQuicPacketHeader) {
QuicPacketHeader header;
QuicPacketHeader header2 = CreateFakePacketHeader();
EXPECT_NE(header, header2);
QuicPacketHeader header3(header2);
EXPECT_EQ(header2, header3);
}
TEST_F(QuicPacketsTest, CopySerializedPacket) {
std::string buffer(1000, 'a');
quiche::SimpleBufferAllocator allocator;
SerializedPacket packet(QuicPacketNumber(1), PACKET_1BYTE_PACKET_NUMBER,
buffer.data(), buffer.length(), false,
false);
packet.retransmittable_frames.push_back(QuicFrame(QuicWindowUpdateFrame()));
packet.retransmittable_frames.push_back(QuicFrame(QuicStreamFrame()));
QuicAckFrame ack_frame(InitAckFrame(1));
packet.nonretransmittable_frames.push_back(QuicFrame(&ack_frame));
packet.nonretransmittable_frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
std::unique_ptr<SerializedPacket> copy = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, true));
EXPECT_EQ(quic::QuicPacketNumber(1), copy->packet_number);
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER, copy->packet_number_length);
ASSERT_EQ(2u, copy->retransmittable_frames.size());
EXPECT_EQ(WINDOW_UPDATE_FRAME, copy->retransmittable_frames[0].type);
EXPECT_EQ(STREAM_FRAME, copy->retransmittable_frames[1].type);
ASSERT_EQ(2u, copy->nonretransmittable_frames.size());
EXPECT_EQ(ACK_FRAME, copy->nonretransmittable_frames[0].type);
EXPECT_EQ(PADDING_FRAME, copy->nonretransmittable_frames[1].type);
EXPECT_EQ(1000u, copy->encrypted_length);
quiche::test::CompareCharArraysWithHexError(
"encrypted_buffer", copy->encrypted_buffer, copy->encrypted_length,
packet.encrypted_buffer, packet.encrypted_length);
std::unique_ptr<SerializedPacket> copy2 = absl::WrapUnique<SerializedPacket>(
CopySerializedPacket(packet, &allocator, false));
EXPECT_EQ(packet.encrypted_buffer, copy2->encrypted_buffer);
EXPECT_EQ(1000u, copy2->encrypted_length);
}
TEST_F(QuicPacketsTest, CloneReceivedPacket) {
char header[4] = "bar";
QuicReceivedPacket packet("foo", 3, QuicTime::Zero(), false, 0, true, header,
sizeof(header) - 1, false,
QuicEcnCodepoint::ECN_ECT1);
std::unique_ptr<QuicReceivedPacket> copy = packet.Clone();
EXPECT_EQ(packet.ecn_codepoint(), copy->ecn_codepoint());
}
}
}
} |
13 | #ifndef AROLLA_UTIL_BINARY_SEARCH_H_
#define AROLLA_UTIL_BINARY_SEARCH_H_
#include <cstddef>
#include <cstdint>
#include <optional>
#include "absl/base/attributes.h"
#include "absl/types/span.h"
namespace arolla {
size_t LowerBound(float value, absl::Span<const float> array);
size_t LowerBound(double value, absl::Span<const double> array);
size_t LowerBound(int32_t value, absl::Span<const int32_t> array);
size_t LowerBound(int64_t value, absl::Span<const int64_t> array);
size_t UpperBound(float value, absl::Span<const float> array);
size_t UpperBound(double value, absl::Span<const double> array);
size_t UpperBound(int32_t value, absl::Span<const int32_t> array);
size_t UpperBound(int64_t value, absl::Span<const int64_t> array);
template <typename T, typename Iter>
Iter GallopingLowerBound(Iter begin, Iter end, const T& value);
}
namespace arolla::binary_search_details {
constexpr size_t kSupremacySizeThreshold = 1'000'000;
template <typename T>
size_t LowerBound(T value, absl::Span<const T> array);
template <typename T>
size_t UpperBound(T value, absl::Span<const T> array);
template <typename T, typename Predicate>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE std::optional<size_t> SmallLinearSearch(
absl::Span<const T> array, Predicate predicate) {
if (array.size() <= 2) {
if (array.empty() || predicate(array[0])) {
return 0;
} else if (array.size() == 1 || predicate(array[1])) {
return 1;
}
return 2;
}
return std::nullopt;
}
size_t UpperBoundImpl(float value, absl::Span<const float> array);
size_t UpperBoundImpl(double value, absl::Span<const double> array);
size_t UpperBoundImpl(int32_t value, absl::Span<const int32_t> array);
size_t UpperBoundImpl(int64_t value, absl::Span<const int64_t> array);
size_t LowerBoundImpl(float value, absl::Span<const float> array);
size_t LowerBoundImpl(double value, absl::Span<const double> array);
size_t LowerBoundImpl(int32_t value, absl::Span<const int32_t> array);
size_t LowerBoundImpl(int64_t value, absl::Span<const int64_t> array);
template <typename T>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t
LowerBound(T value, absl::Span<const T> array) {
if (auto result =
SmallLinearSearch(array, [value](T arg) { return !(arg < value); })) {
return *result;
}
return LowerBoundImpl(value, array);
}
template <typename T>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t
UpperBound(T value, absl::Span<const T> array) {
if (auto result =
SmallLinearSearch(array, [value](T arg) { return value < arg; })) {
return *result;
}
return UpperBoundImpl(value, array);
}
}
namespace arolla {
inline size_t LowerBound(float value, absl::Span<const float> array) {
return binary_search_details::LowerBound<float>(value, array);
}
inline size_t LowerBound(double value, absl::Span<const double> array) {
return binary_search_details::LowerBound<double>(value, array);
}
inline size_t LowerBound(int32_t value, absl::Span<const int32_t> array) {
return binary_search_details::LowerBound<int32_t>(value, array);
}
inline size_t LowerBound(int64_t value, absl::Span<const int64_t> array) {
return binary_search_details::LowerBound<int64_t>(value, array);
}
inline size_t UpperBound(float value, absl::Span<const float> array) {
return binary_search_details::UpperBound<float>(value, array);
}
inline size_t UpperBound(double value, absl::Span<const double> array) {
return binary_search_details::UpperBound<double>(value, array);
}
inline size_t UpperBound(int32_t value, absl::Span<const int32_t> array) {
return binary_search_details::UpperBound<int32_t>(value, array);
}
inline size_t UpperBound(int64_t value, absl::Span<const int64_t> array) {
return binary_search_details::UpperBound<int64_t>(value, array);
}
template <typename T, typename Iter>
Iter GallopingLowerBound(Iter begin, Iter end, const T& value) {
size_t i = 0;
size_t size = end - begin;
if (begin >= end || !(*begin < value)) {
return std::min<Iter>(begin, end);
}
size_t d = 1;
while (i + d < size && begin[i + d] < value) {
i += d;
d <<= 1;
}
while (d > 1) {
d >>= 1;
if (i + d < size && begin[i + d] < value) {
i += d;
}
}
return begin + i + 1;
}
}
#endif
#include "arolla/util/binary_search.h"
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include "absl/types/span.h"
#include "arolla/util/bits.h"
#include "arolla/util/switch_index.h"
namespace arolla::binary_search_details {
namespace {
template <size_t kArraySize, typename T, class Predicate>
size_t FastBinarySearchT(const T* const array, Predicate predicate) {
static_assert((kArraySize & (kArraySize + 1)) == 0);
size_t offset = 0;
for (size_t k = kArraySize; k > 0;) {
k >>= 1;
offset = (!predicate(array[offset + k]) ? offset + k + 1 : offset);
}
return offset;
}
template <typename T, typename Predicate>
size_t BinarySearchT(absl::Span<const T> array, Predicate predicate) {
assert(!array.empty());
const int log2_size = BitScanReverse(array.size());
return switch_index<8 * sizeof(size_t)>(
log2_size, [array, predicate](auto constexpr_log2_size) {
constexpr size_t size =
(1ULL << static_cast<int>(constexpr_log2_size)) - 1;
size_t offset = 0;
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
offset = (!predicate(array[size]) ? array.size() - size : offset);
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
return offset +
FastBinarySearchT<size>(array.begin() + offset, predicate);
});
}
}
size_t LowerBoundImpl(float value, absl::Span<const float> array) {
return BinarySearchT(array, [value](auto arg) { return !(arg < value); });
}
size_t LowerBoundImpl(double value, absl::Span<const double> array) {
return BinarySearchT(array, [value](auto arg) { return !(arg < value); });
}
size_t LowerBoundImpl(int32_t value, absl::Span<const int32_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg >= value; });
}
size_t LowerBoundImpl(int64_t value, absl::Span<const int64_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg >= value; });
}
size_t UpperBoundImpl(float value, absl::Span<const float> array) {
if (std::isnan(value)) {
return array.size();
}
return BinarySearchT(array, [value](auto arg) { return !(arg <= value); });
}
size_t UpperBoundImpl(double value, absl::Span<const double> array) {
if (std::isnan(value)) {
return array.size();
}
return BinarySearchT(array, [value](auto arg) { return !(arg <= value); });
}
size_t UpperBoundImpl(int32_t value, absl::Span<const int32_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg > value; });
}
size_t UpperBoundImpl(int64_t value, absl::Span<const int64_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg > value; });
}
} | #include "arolla/util/binary_search.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "absl/types/span.h"
namespace arolla {
namespace {
size_t StdLowerBound(float value, absl::Span<const float> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdLowerBound(double value, absl::Span<const double> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdLowerBound(int32_t value, absl::Span<const int32_t> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdLowerBound(int64_t value, absl::Span<const int64_t> array) {
return std::lower_bound(array.begin(), array.end(), value) - array.begin();
}
size_t RlGallopingLowerBound(float value, absl::Span<const float> array) {
return GallopingLowerBound(array.begin(), array.end(), value) - array.begin();
}
TEST(Algorithms, LowerBound_General) {
for (int n : {0, 1, 5, 7, 100, 1000}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds[i] = 2 * i + 1;
}
for (int i = 0; i < static_cast<int>(2 * thresholds.size()); ++i) {
size_t expected = StdLowerBound(i, thresholds);
ASSERT_EQ(LowerBound(i, thresholds), expected);
ASSERT_EQ(RlGallopingLowerBound(i, thresholds), expected);
}
ASSERT_EQ(LowerBound(-10 * n, thresholds),
StdLowerBound(-10 * n, thresholds));
ASSERT_EQ(LowerBound(10 * n, thresholds),
StdLowerBound(10 * n, thresholds));
}
}
TEST(Algorithms, LowerBound_Duplicates) {
for (int n : {2, 140}) {
std::vector<float> thresholds(n, 0.);
ASSERT_EQ(LowerBound(-1, thresholds), 0);
ASSERT_EQ(LowerBound(0., thresholds), 0);
ASSERT_EQ(LowerBound(1., thresholds), n);
ASSERT_EQ(RlGallopingLowerBound(-1, thresholds), 0);
ASSERT_EQ(RlGallopingLowerBound(0., thresholds), 0);
ASSERT_EQ(RlGallopingLowerBound(1., thresholds), n);
}
}
TEST(Algorithms, LowerBound_Infs) {
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(LowerBound(-kInf, thresholds), StdLowerBound(-kInf, thresholds));
ASSERT_EQ(LowerBound(kInf, thresholds), StdLowerBound(kInf, thresholds));
ASSERT_EQ(RlGallopingLowerBound(kInf, thresholds),
StdLowerBound(kInf, thresholds));
}
}
TEST(Algorithms, LowerBound_Nan) {
const auto kNan = std::numeric_limits<float>::quiet_NaN();
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds;
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(LowerBound(kNan, thresholds), StdLowerBound(kNan, thresholds));
ASSERT_EQ(RlGallopingLowerBound(kNan, thresholds),
StdLowerBound(kNan, thresholds));
}
}
size_t StdUpperBound(float value, absl::Span<const float> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdUpperBound(double value, absl::Span<const double> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdUpperBound(int32_t value, absl::Span<const int32_t> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
size_t StdUpperBound(int64_t value, absl::Span<const int64_t> array) {
return std::upper_bound(array.begin(), array.end(), value) - array.begin();
}
TEST(Algorithms, UpperBound_General) {
for (int n : {0, 1, 5, 7, 100, 1000}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds[i] = 2 * i + 1;
}
for (int i = 0; i < static_cast<int>(2 * thresholds.size()); ++i) {
ASSERT_EQ(UpperBound(i, thresholds), StdUpperBound(i, thresholds));
}
ASSERT_EQ(UpperBound(-10 * n, thresholds),
StdUpperBound(-10 * n, thresholds));
ASSERT_EQ(UpperBound(10 * n, thresholds),
StdUpperBound(10 * n, thresholds));
}
}
TEST(Algorithms, UpperBound_Duplicates) {
for (int n : {2, 140}) {
std::vector<float> thresholds(n, 0.);
ASSERT_EQ(UpperBound(-1, thresholds), StdUpperBound(-1., thresholds));
ASSERT_EQ(UpperBound(0., thresholds), StdUpperBound(0., thresholds));
}
}
TEST(Algorithms, UpperBound_Infs) {
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds(n);
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(UpperBound(-kInf, thresholds), StdUpperBound(-kInf, thresholds));
ASSERT_EQ(UpperBound(kInf, thresholds), StdUpperBound(kInf, thresholds));
}
}
TEST(Algorithms, UpperBound_Nan) {
const auto kNan = std::numeric_limits<float>::quiet_NaN();
const auto kInf = std::numeric_limits<float>::infinity();
for (int n : {2, 140}) {
std::vector<float> thresholds;
for (int i = 0; i < n; ++i) {
thresholds.push_back(i);
}
thresholds.front() = -kInf;
thresholds.back() = kInf;
ASSERT_EQ(UpperBound(kNan, thresholds), StdUpperBound(kNan, thresholds));
}
}
template <typename T>
std::vector<T> RandomVector(size_t seed, size_t size) {
std::mt19937 gen(seed);
std::vector<T> result(size);
if constexpr (std::is_integral_v<T>) {
std::uniform_int_distribution<T> uniform(0, 1 << 30);
for (auto& x : result) {
x = uniform(gen);
}
} else {
std::uniform_real_distribution<T> uniform01;
for (auto& x : result) {
x = uniform01(gen);
}
}
return result;
}
template <typename T>
std::vector<T> Sorted(std::vector<T> vec) {
std::sort(vec.begin(), vec.end());
return vec;
}
template <typename T>
using AlgoFn = std::function<size_t(T, const std::vector<T>&)>;
template <typename T>
void BinarySearchStressTest(size_t size, AlgoFn<T> algoFn,
AlgoFn<T> referenceAlgoFn) {
const auto seed = 34 + size;
const auto array = Sorted(RandomVector<T>(seed, size));
for (auto value : RandomVector<T>(seed, 2 * size)) {
const auto actual_value = algoFn(value, array);
const auto expected_value = referenceAlgoFn(value, array);
if (actual_value != expected_value) {
ADD_FAILURE() << "Actual value: " << actual_value << '\n'
<< "Expected value: " << expected_value << '\n'
<< "size: " << size;
return;
}
}
}
TEST(Algorithms, LowerBound_Stress) {
for (int size : {10, 100, 1000, 100000}) {
BinarySearchStressTest<float>(
size,
[](float value, absl::Span<const float> array) {
return LowerBound(value, array);
},
[](float value, absl::Span<const float> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<float>(
size,
[](float value, absl::Span<const float> array) {
return RlGallopingLowerBound(value, array);
},
[](float value, absl::Span<const float> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<double>(
size,
[](double value, absl::Span<const double> array) {
return LowerBound(value, array);
},
[](double value, absl::Span<const double> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<int32_t>(
size,
[](int32_t value, absl::Span<const int32_t> array) {
return LowerBound(value, array);
},
[](int32_t value, absl::Span<const int32_t> array) {
return StdLowerBound(value, array);
});
BinarySearchStressTest<int64_t>(
size,
[](int64_t value, absl::Span<const int64_t> array) {
return LowerBound(value, array);
},
[](int64_t value, absl::Span<const int64_t> array) {
return StdLowerBound(value, array);
});
}
}
TEST(Algorithms, UpperBound_Stress) {
for (int size : {10, 100, 1000, 100000}) {
BinarySearchStressTest<float>(
size,
[](float value, absl::Span<const float> array) {
return UpperBound(value, array);
},
[](float value, absl::Span<const float> array) {
return StdUpperBound(value, array);
});
BinarySearchStressTest<double>(
size,
[](double value, absl::Span<const double> array) {
return UpperBound(value, array);
},
[](double value, absl::Span<const double> array) {
return StdUpperBound(value, array);
});
BinarySearchStressTest<int32_t>(
size,
[](int32_t value, absl::Span<const int32_t> array) {
return UpperBound(value, array);
},
[](int32_t value, absl::Span<const int32_t> array) {
return StdUpperBound(value, array);
});
BinarySearchStressTest<int64_t>(
size,
[](int64_t value, absl::Span<const int64_t> array) {
return UpperBound(value, array);
},
[](int64_t value, absl::Span<const int64_t> array) {
return StdUpperBound(value, array);
});
}
}
}
} |
14 | #ifndef AROLLA_EXPR_DERIVED_CAST_OPERATOR_H_
#define AROLLA_EXPR_DERIVED_CAST_OPERATOR_H_
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/qtype/qtype.h"
namespace arolla::expr {
class DerivedQTypeUpcastOperator final : public BuiltinExprOperatorTag,
public BasicExprOperator {
public:
static absl::StatusOr<QTypePtr> GetOutputQType(QTypePtr derived_qtype,
QTypePtr value_qtype);
explicit DerivedQTypeUpcastOperator(QTypePtr derived_qtype);
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final;
QTypePtr derived_qtype() const;
private:
QTypePtr derived_qtype_;
};
class DerivedQTypeDowncastOperator final : public BuiltinExprOperatorTag,
public BasicExprOperator {
public:
static absl::StatusOr<QTypePtr> GetOutputQType(QTypePtr derived_qtype,
QTypePtr value_qtype);
explicit DerivedQTypeDowncastOperator(QTypePtr derived_qtype);
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final;
QTypePtr derived_qtype() const;
private:
QTypePtr derived_qtype_;
};
}
#endif
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
absl::StatusOr<QTypePtr> DerivedQTypeUpcastOperator::GetOutputQType(
QTypePtr derived_qtype, QTypePtr value_qtype) {
if (value_qtype == derived_qtype) {
return DecayDerivedQType(derived_qtype);
}
return absl::InvalidArgumentError(
absl::StrFormat("expected %s, got value: %s", derived_qtype->name(),
value_qtype->name()));
}
DerivedQTypeUpcastOperator::DerivedQTypeUpcastOperator(QTypePtr derived_qtype)
: BasicExprOperator(
absl::StrFormat("derived_qtype.upcast[%s]", derived_qtype->name()),
ExprOperatorSignature{{"value"}},
"Casts a derived value to the base type.",
FingerprintHasher("arolla::expr::DerivedQTypeUpcastOperator")
.Combine(derived_qtype)
.Finish()),
derived_qtype_(derived_qtype) {}
absl::StatusOr<QTypePtr> DerivedQTypeUpcastOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
return DerivedQTypeUpcastOperator::GetOutputQType(derived_qtype_,
input_qtypes[0]);
}
QTypePtr DerivedQTypeUpcastOperator::derived_qtype() const {
return derived_qtype_;
}
absl::StatusOr<QTypePtr> DerivedQTypeDowncastOperator::GetOutputQType(
QTypePtr derived_qtype, QTypePtr value_qtype) {
const auto* base_qtype = DecayDerivedQType(derived_qtype);
if (value_qtype == base_qtype) {
return derived_qtype;
}
return absl::InvalidArgumentError(absl::StrFormat(
"expected %s, got value: %s", base_qtype->name(), value_qtype->name()));
}
DerivedQTypeDowncastOperator::DerivedQTypeDowncastOperator(
QTypePtr derived_qtype)
: BasicExprOperator(
absl::StrFormat("derived_qtype.downcast[%s]", derived_qtype->name()),
ExprOperatorSignature{{"value"}},
"Casts a base qtype value to the derived qtype.",
FingerprintHasher("arolla::expr::DerivedQTypeDowncastOperator")
.Combine(derived_qtype)
.Finish()),
derived_qtype_(derived_qtype) {}
absl::StatusOr<QTypePtr> DerivedQTypeDowncastOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
return DerivedQTypeDowncastOperator::GetOutputQType(derived_qtype_,
input_qtypes[0]);
}
QTypePtr DerivedQTypeDowncastOperator::derived_qtype() const {
return derived_qtype_;
}
} | #include "arolla/expr/derived_qtype_cast_operator.h"
#include <memory>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/indestructible.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::InvokeExprOperator;
using ::arolla::testing::ReprTokenEq;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
struct TimeQType final : BasicDerivedQType {
TimeQType()
: BasicDerivedQType(ConstructorArgs{
.name = "TIME",
.base_qtype = GetQType<float>(),
}) {}
ReprToken UnsafeReprToken(const void* source) const override {
auto result = GetBaseQType()->UnsafeReprToken(source);
result.str += "s";
return result;
}
static QTypePtr get() {
static const Indestructible<TimeQType> result;
return result.get();
}
};
struct DistanceQType final : BasicDerivedQType {
DistanceQType()
: BasicDerivedQType(ConstructorArgs{
.name = "DISTANCE",
.base_qtype = GetQType<float>(),
}) {}
ReprToken UnsafeReprToken(const void* source) const override {
auto result = GetBaseQType()->UnsafeReprToken(source);
result.str += "m";
return result;
}
static QTypePtr get() {
static const Indestructible<DistanceQType> result;
return result.get();
}
};
class DerivedQTypeCastOperatorTests : public ::testing::Test {
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(DerivedQTypeCastOperatorTests, UpcastDistance_WithDistanceInput) {
ExprOperatorPtr upcast_distance =
std::make_shared<DerivedQTypeUpcastOperator>(DistanceQType::get());
ASSERT_OK_AND_ASSIGN(
auto d, TypedValue::FromValueWithQType(6.28f, DistanceQType::get()));
ASSERT_OK_AND_ASSIGN(auto f32,
InvokeExprOperator<TypedValue>(upcast_distance, d));
EXPECT_EQ(f32.GetType(), GetQType<float>());
EXPECT_THAT(f32.GenReprToken(),
ReprTokenEq("6.28", ReprToken::kSafeForNegation));
}
TEST_F(DerivedQTypeCastOperatorTests, UpcastDistance_WithFloat32Input) {
ExprOperatorPtr upcast_distance =
std::make_shared<DerivedQTypeUpcastOperator>(DistanceQType::get());
EXPECT_THAT(InvokeExprOperator<TypedValue>(upcast_distance, 6.28f),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected DISTANCE, got value: FLOAT32")));
}
TEST_F(DerivedQTypeCastOperatorTests, UpcastFloat32_WithDistanceInput) {
ExprOperatorPtr upcast_float32 =
std::make_shared<DerivedQTypeUpcastOperator>(GetQType<float>());
ASSERT_OK_AND_ASSIGN(
auto d, TypedValue::FromValueWithQType(6.28f, DistanceQType::get()));
EXPECT_THAT(InvokeExprOperator<TypedValue>(upcast_float32, d),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected FLOAT32, got value: DISTANCE")));
}
TEST_F(DerivedQTypeCastOperatorTests, UpcastFloat32_WithFloat32Input) {
ExprOperatorPtr upcast_float32 =
std::make_shared<DerivedQTypeUpcastOperator>(GetQType<float>());
ASSERT_OK_AND_ASSIGN(auto f32,
InvokeExprOperator<TypedValue>(upcast_float32, 6.28f));
EXPECT_EQ(f32.GetType(), GetQType<float>());
EXPECT_THAT(f32.GenReprToken(),
ReprTokenEq("6.28", ReprToken::kSafeForNegation));
}
TEST_F(DerivedQTypeCastOperatorTests, DowncastDistance_WithDistanceInput) {
ExprOperatorPtr downcast_distance =
std::make_shared<DerivedQTypeDowncastOperator>(DistanceQType::get());
ASSERT_OK_AND_ASSIGN(
auto d, TypedValue::FromValueWithQType(6.28f, DistanceQType::get()));
EXPECT_THAT(InvokeExprOperator<TypedValue>(downcast_distance, d),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected FLOAT32, got value: DISTANCE")));
}
TEST_F(DerivedQTypeCastOperatorTests, DowncastDistance_WithFloat32Input) {
ExprOperatorPtr downcast_distance =
std::make_shared<DerivedQTypeDowncastOperator>(DistanceQType::get());
ASSERT_OK_AND_ASSIGN(
auto d, InvokeExprOperator<TypedValue>(downcast_distance, 6.28f));
EXPECT_EQ(d.GetType(), DistanceQType::get());
EXPECT_THAT(d.GenReprToken(),
ReprTokenEq("6.28m", ReprToken::kSafeForNegation));
}
TEST_F(DerivedQTypeCastOperatorTests, DowncastFloat32_WithDistanceInput) {
ExprOperatorPtr downcast_float32 =
std::make_shared<DerivedQTypeDowncastOperator>(GetQType<float>());
ASSERT_OK_AND_ASSIGN(
auto d, TypedValue::FromValueWithQType(6.28f, DistanceQType::get()));
EXPECT_THAT(InvokeExprOperator<TypedValue>(downcast_float32, d),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected FLOAT32, got value: DISTANCE")));
}
TEST_F(DerivedQTypeCastOperatorTests, DowncastFloat32_WithFloat32Input) {
ExprOperatorPtr downcast_float32 =
std::make_shared<DerivedQTypeDowncastOperator>(GetQType<float>());
ASSERT_OK_AND_ASSIGN(auto f32,
InvokeExprOperator<TypedValue>(downcast_float32, 6.28f));
EXPECT_EQ(f32.GetType(), GetQType<float>());
EXPECT_THAT(f32.GenReprToken(),
ReprTokenEq("6.28", ReprToken::kSafeForNegation));
}
}
} |
15 | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MESSAGE_WRAPPERS_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_MESSAGE_WRAPPERS_H_
#include "absl/status/status.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/master.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
class RunStepRequestWrapper {
public:
virtual ~RunStepRequestWrapper() {}
virtual const string& session_handle() const = 0;
virtual const string& partial_run_handle() const = 0;
virtual size_t num_feeds() const = 0;
virtual const string& feed_name(size_t i) const = 0;
virtual Status FeedValue(size_t i, Tensor* out_tensor) const = 0;
virtual Status FeedValue(size_t i, TensorProto* out_tensor) const = 0;
virtual size_t num_fetches() const = 0;
virtual const string& fetch_name(size_t i) const = 0;
virtual size_t num_targets() const = 0;
virtual const string& target_name(size_t i) const = 0;
virtual const RunOptions& options() const = 0;
virtual bool store_errors_in_response_body() const = 0;
virtual int64_t request_id() const = 0;
virtual string DebugString() const = 0;
virtual const RunStepRequest& ToProto() const = 0;
};
class MutableRunStepRequestWrapper : public RunStepRequestWrapper {
public:
virtual void set_session_handle(const string& handle) = 0;
virtual void set_partial_run_handle(const string& handle) = 0;
virtual void add_feed(const string& name, const Tensor& value) = 0;
virtual void add_fetch(const string& name) = 0;
virtual void add_target(const string& name) = 0;
virtual RunOptions* mutable_options() = 0;
virtual void set_store_errors_in_response_body(bool store_errors) = 0;
};
class InMemoryRunStepRequest : public MutableRunStepRequestWrapper {
public:
const string& session_handle() const override;
const string& partial_run_handle() const override;
size_t num_feeds() const override;
const string& feed_name(size_t i) const override;
Status FeedValue(size_t i, Tensor* out_tensor) const override;
Status FeedValue(size_t i, TensorProto* out_tensor) const override;
size_t num_fetches() const override;
const string& fetch_name(size_t i) const override;
size_t num_targets() const override;
const string& target_name(size_t i) const override;
const RunOptions& options() const override;
string DebugString() const override;
const RunStepRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
void set_session_handle(const string& handle) override;
void set_partial_run_handle(const string& handle) override;
void add_feed(const string& name, const Tensor& value) override;
void add_fetch(const string& name) override;
void add_target(const string& name) override;
RunOptions* mutable_options() override;
void set_store_errors_in_response_body(bool store_errors) override;
private:
string session_handle_;
string partial_run_handle_;
gtl::InlinedVector<std::pair<string, Tensor>, 4> feeds_;
gtl::InlinedVector<string, 4> fetches_;
gtl::InlinedVector<string, 4> targets_;
RunOptions options_;
bool store_errors_in_response_body_ = false;
mutable std::unique_ptr<RunStepRequest> proto_version_;
};
class MutableProtoRunStepRequest : public MutableRunStepRequestWrapper {
public:
const string& session_handle() const override;
const string& partial_run_handle() const override;
size_t num_feeds() const override;
const string& feed_name(size_t i) const override;
Status FeedValue(size_t i, Tensor* out_tensor) const override;
Status FeedValue(size_t i, TensorProto* out_tensor) const override;
size_t num_fetches() const override;
const string& fetch_name(size_t i) const override;
size_t num_targets() const override;
const string& target_name(size_t i) const override;
const RunOptions& options() const override;
string DebugString() const override;
const RunStepRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
void set_session_handle(const string& handle) override;
void set_partial_run_handle(const string& handle) override;
void add_feed(const string& name, const Tensor& value) override;
void add_fetch(const string& name) override;
void add_target(const string& name) override;
RunOptions* mutable_options() override;
void set_store_errors_in_response_body(bool store_errors) override;
private:
RunStepRequest request_;
friend class MasterInterface;
};
class ProtoRunStepRequest : public RunStepRequestWrapper {
public:
ProtoRunStepRequest(const RunStepRequest* request);
const string& session_handle() const override;
const string& partial_run_handle() const override;
size_t num_feeds() const override;
const string& feed_name(size_t i) const override;
Status FeedValue(size_t i, Tensor* out_tensor) const override;
Status FeedValue(size_t i, TensorProto* out_tensor) const override;
size_t num_fetches() const override;
const string& fetch_name(size_t i) const override;
size_t num_targets() const override;
const string& target_name(size_t i) const override;
const RunOptions& options() const override;
string DebugString() const override;
const RunStepRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
private:
const RunStepRequest* const request_;
};
class RunGraphRequestWrapper {
public:
virtual ~RunGraphRequestWrapper() {}
virtual const string& session_handle() const = 0;
virtual bool create_worker_session_called() const = 0;
virtual const string& graph_handle() const = 0;
virtual int64_t step_id() const = 0;
virtual const ExecutorOpts& exec_opts() const = 0;
virtual size_t num_sends() const = 0;
virtual const string& send_key(size_t i) const = 0;
virtual Status SendValue(size_t i, Tensor* out_tensor) const = 0;
virtual size_t num_recvs() const = 0;
virtual const string& recv_key(size_t i) const = 0;
virtual bool is_partial() const = 0;
virtual bool is_last_partial_run() const = 0;
virtual bool store_errors_in_response_body() const = 0;
virtual int64_t request_id() const = 0;
virtual const RunGraphRequest& ToProto() const = 0;
};
class MutableRunGraphRequestWrapper : public RunGraphRequestWrapper {
public:
virtual void set_session_handle(const string& handle) = 0;
virtual void set_create_worker_session_called(bool called) = 0;
virtual void set_graph_handle(const string& handle) = 0;
virtual void set_step_id(int64_t step_id) = 0;
virtual ExecutorOpts* mutable_exec_opts() = 0;
virtual Status AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) = 0;
virtual Status AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) = 0;
virtual void add_recv_key(const string& recv_key) = 0;
virtual void set_is_partial(bool is_partial) = 0;
virtual void set_is_last_partial_run(bool is_last_partial_run) = 0;
virtual void set_store_errors_in_response_body(bool store_errors) = 0;
virtual void set_request_id(int64_t request_id) = 0;
};
class InMemoryRunGraphRequest : public MutableRunGraphRequestWrapper {
public:
const string& session_handle() const override;
const string& graph_handle() const override;
bool create_worker_session_called() const override;
int64_t step_id() const override;
const ExecutorOpts& exec_opts() const override;
size_t num_sends() const override;
const string& send_key(size_t i) const override;
Status SendValue(size_t i, Tensor* out_tensor) const override;
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
bool is_partial() const override;
bool is_last_partial_run() const override;
const RunGraphRequest& ToProto() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
void set_session_handle(const string& handle) override;
void set_create_worker_session_called(bool called) override;
void set_graph_handle(const string& handle) override;
void set_step_id(int64_t step_id) override;
ExecutorOpts* mutable_exec_opts() override;
Status AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) override;
Status AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) override;
void add_recv_key(const string& recv_key) override;
void set_is_partial(bool is_partial) override;
void set_is_last_partial_run(bool is_last_partial_run) override;
void set_store_errors_in_response_body(bool store_errors) override;
void set_request_id(int64_t request_id) override;
private:
string session_handle_;
bool create_worker_session_called_ = false;
string graph_handle_;
int64_t step_id_;
ExecutorOpts exec_opts_;
gtl::InlinedVector<std::pair<string, Tensor>, 4> sends_;
gtl::InlinedVector<string, 4> recvs_;
bool is_partial_ = false;
bool is_last_partial_run_ = false;
bool store_errors_in_response_body_ = false;
int64_t request_id_ = 0;
mutable std::unique_ptr<RunGraphRequest> proto_version_;
};
class MutableProtoRunGraphRequest : public MutableRunGraphRequestWrapper {
public:
const string& session_handle() const override;
bool create_worker_session_called() const override;
const string& graph_handle() const override;
int64_t step_id() const override;
const ExecutorOpts& exec_opts() const override;
size_t num_sends() const override;
const string& send_key(size_t i) const override;
Status SendValue(size_t i, Tensor* out_tensor) const override;
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
bool is_partial() const override;
bool is_last_partial_run() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
const RunGraphRequest& ToProto() const override;
void set_session_handle(const string& handle) override;
void set_create_worker_session_called(bool called) override;
void set_graph_handle(const string& handle) override;
void set_step_id(int64_t step_id) override;
ExecutorOpts* mutable_exec_opts() override;
Status AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) override;
Status AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) override;
void add_recv_key(const string& recv_key) override;
void set_is_partial(bool is_partial) override;
void set_is_last_partial_run(bool is_last_partial_run) override;
void set_store_errors_in_response_body(bool store_errors) override;
void set_request_id(int64_t request_id) override;
private:
RunGraphRequest request_;
};
class ProtoRunGraphRequest : public RunGraphRequestWrapper {
public:
ProtoRunGraphRequest(const RunGraphRequest* request);
const string& session_handle() const override;
bool create_worker_session_called() const override;
const string& graph_handle() const override;
int64_t step_id() const override;
const ExecutorOpts& exec_opts() const override;
size_t num_sends() const override;
const string& send_key(size_t i) const override;
Status SendValue(size_t i, Tensor* out_tensor) const override;
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
bool is_partial() const override;
bool is_last_partial_run() const override;
bool store_errors_in_response_body() const override;
int64_t request_id() const override;
const RunGraphRequest& ToProto() const override;
private:
const RunGraphRequest* const request_;
};
class MutableRunGraphResponseWrapper {
public:
virtual ~MutableRunGraphResponseWrapper() {}
virtual size_t num_recvs() const = 0;
virtual const string& recv_key(size_t i) const = 0;
virtual Status RecvValue(size_t i, TensorProto* out_tensor) = 0;
virtual Status RecvValue(size_t i, Tensor* out_tensor) = 0;
virtual void AddRecv(const string& key, const Tensor& value) = 0;
virtual StepStats* mutable_step_stats() = 0;
virtual CostGraphDef* mutable_cost_graph() = 0;
virtual size_t num_partition_graphs() const = 0;
virtual GraphDef* mutable_partition_graph(size_t i) = 0;
virtual void AddPartitionGraph(const GraphDef& partition_graph) = 0;
virtual Status status() const = 0;
virtual absl::StatusCode status_code() const = 0;
virtual void set_status(const Status& status) = 0;
protected:
virtual RunGraphResponse* get_proto() = 0;
friend class WorkerInterface;
};
class InMemoryRunGraphResponse : public MutableRunGraphResponseWrapper {
public:
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
Status RecvValue(size_t i, TensorProto* out_tensor) override;
Status RecvValue(size_t i, Tensor* out_tensor) override;
void AddRecv(const string& key, const Tensor& value) override;
StepStats* mutable_step_stats() override;
CostGraphDef* mutable_cost_graph() override;
size_t num_partition_graphs() const override;
GraphDef* mutable_partition_graph(size_t i) override;
void AddPartitionGraph(const GraphDef& partition_graph) override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunGraphResponse* get_proto() override;
private:
gtl::InlinedVector<std::pair<string, Tensor>, 4> recvs_;
StepStats step_stats_;
CostGraphDef cost_graph_;
std::vector<GraphDef> partition_graphs_;
Status status_;
};
class OwnedProtoRunGraphResponse : public MutableRunGraphResponseWrapper {
public:
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
Status RecvValue(size_t i, TensorProto* out_tensor) override;
Status RecvValue(size_t i, Tensor* out_tensor) override;
void AddRecv(const string& key, const Tensor& value) override;
StepStats* mutable_step_stats() override;
CostGraphDef* mutable_cost_graph() override;
size_t num_partition_graphs() const override;
GraphDef* mutable_partition_graph(size_t i) override;
void AddPartitionGraph(const GraphDef& partition_graph) override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunGraphResponse* get_proto() override;
private:
RunGraphResponse response_;
};
class NonOwnedProtoRunGraphResponse : public MutableRunGraphResponseWrapper {
public:
NonOwnedProtoRunGraphResponse(RunGraphResponse* response);
size_t num_recvs() const override;
const string& recv_key(size_t i) const override;
Status RecvValue(size_t i, TensorProto* out_tensor) override;
Status RecvValue(size_t i, Tensor* out_tensor) override;
void AddRecv(const string& key, const Tensor& value) override;
StepStats* mutable_step_stats() override;
CostGraphDef* mutable_cost_graph() override;
size_t num_partition_graphs() const override;
GraphDef* mutable_partition_graph(size_t i) override;
void AddPartitionGraph(const GraphDef& partition_graph) override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunGraphResponse* get_proto() override;
private:
RunGraphResponse* const response_;
};
class MutableRunStepResponseWrapper {
public:
virtual ~MutableRunStepResponseWrapper();
virtual size_t num_tensors() const = 0;
virtual const string& tensor_name(size_t i) const = 0;
virtual Status TensorValue(size_t i, Tensor* out_tensor) const = 0;
virtual Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) = 0;
virtual const RunMetadata& metadata() const = 0;
virtual RunMetadata* mutable_metadata() = 0;
virtual Status status() const = 0;
virtual absl::StatusCode status_code() const = 0;
virtual void set_status(const Status& status) = 0;
protected:
virtual RunStepResponse* get_proto() = 0;
friend class MasterInterface;
};
class InMemoryRunStepResponse : public MutableRunStepResponseWrapper {
public:
size_t num_tensors() const override;
const string& tensor_name(size_t i) const override;
Status TensorValue(size_t i, Tensor* out_tensor) const override;
Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) override;
const RunMetadata& metadata() const override;
RunMetadata* mutable_metadata() override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunStepResponse* get_proto() override;
private:
gtl::InlinedVector<std::pair<string, Tensor>, 4> tensors_;
RunMetadata metadata_;
Status status_;
};
class OwnedProtoRunStepResponse : public MutableRunStepResponseWrapper {
public:
size_t num_tensors() const override;
const string& tensor_name(size_t i) const override;
Status TensorValue(size_t i, Tensor* out_tensor) const override;
Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) override;
const RunMetadata& metadata() const override;
RunMetadata* mutable_metadata() override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunStepResponse* get_proto() override;
private:
RunStepResponse response_;
};
class NonOwnedProtoRunStepResponse : public MutableRunStepResponseWrapper {
public:
NonOwnedProtoRunStepResponse(RunStepResponse* response);
size_t num_tensors() const override;
const string& tensor_name(size_t i) const override;
Status TensorValue(size_t i, Tensor* out_tensor) const override;
Status AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) override;
const RunMetadata& metadata() const override;
RunMetadata* mutable_metadata() override;
Status status() const override;
absl::StatusCode status_code() const override;
void set_status(const Status& status) override;
protected:
RunStepResponse* get_proto() override;
private:
RunStepResponse* response_;
};
bool ParseTensorProtoToTensor(const TensorProto& tensor_proto,
Tensor* out_tensor);
}
#endif
#include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "absl/status/status.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/named_tensor.pb.h"
namespace tensorflow {
bool ParseTensorProtoToTensor(const TensorProto& tensor_proto,
Tensor* out_tensor) {
if (tensor_proto.dtype() > 0 && tensor_proto.dtype() <= DataType_MAX) {
Tensor parsed(tensor_proto.dtype());
if (parsed.FromProto(cpu_allocator(), tensor_proto)) {
*out_tensor = parsed;
return true;
}
}
return false;
}
const string& InMemoryRunStepRequest::session_handle() const {
return session_handle_;
}
void InMemoryRunStepRequest::set_session_handle(const string& handle) {
session_handle_ = handle;
}
const string& InMemoryRunStepRequest::partial_run_handle() const {
return partial_run_handle_;
}
void InMemoryRunStepRequest::set_partial_run_handle(const string& handle) {
partial_run_handle_ = handle;
}
size_t InMemoryRunStepRequest::num_feeds() const { return feeds_.size(); }
const string& InMemoryRunStepRequest::feed_name(size_t i) const {
return feeds_[i].first;
}
Status InMemoryRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
*out_tensor = feeds_[i].second;
return absl::OkStatus();
}
Status InMemoryRunStepRequest::FeedValue(size_t i, | #include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace {
Tensor TensorA() {
Tensor a_tensor(DT_INT32, TensorShape({2, 2}));
test::FillValues<int32>(&a_tensor, {3, 2, -1, 0});
return a_tensor;
}
Tensor TensorB() {
Tensor b_tensor(DT_INT32, TensorShape({1, 2}));
test::FillValues<int32>(&b_tensor, {1, 2});
return b_tensor;
}
void BuildRunStepRequest(MutableRunStepRequestWrapper* request) {
request->set_session_handle("handle");
request->set_partial_run_handle("partial_handle");
request->add_feed("feed_a:0", TensorA());
request->add_feed("feed_b:0", TensorB());
request->add_fetch("fetch_x:0");
request->add_fetch("fetch_y:0");
request->add_target("target_i");
request->add_target("target_j");
request->mutable_options()->set_timeout_in_ms(37);
}
void CheckRunStepRequest(const RunStepRequestWrapper& request) {
EXPECT_EQ("handle", request.session_handle());
EXPECT_EQ("partial_handle", request.partial_run_handle());
EXPECT_EQ(2, request.num_feeds());
EXPECT_EQ("feed_a:0", request.feed_name(0));
EXPECT_EQ("feed_b:0", request.feed_name(1));
Tensor val;
TF_EXPECT_OK(request.FeedValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.FeedValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_EQ(2, request.num_fetches());
EXPECT_EQ("fetch_x:0", request.fetch_name(0));
EXPECT_EQ("fetch_y:0", request.fetch_name(1));
EXPECT_EQ("target_i", request.target_name(0));
EXPECT_EQ("target_j", request.target_name(1));
EXPECT_EQ(37, request.options().timeout_in_ms());
}
void BuildRunGraphRequest(const RunStepRequestWrapper& run_step_request,
MutableRunGraphRequestWrapper* run_graph_request) {
run_graph_request->set_graph_handle("graph_handle");
run_graph_request->set_step_id(13);
run_graph_request->mutable_exec_opts()->set_record_timeline(true);
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 0,
"send_0"));
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 1,
"send_1"));
run_graph_request->add_recv_key("recv_2");
run_graph_request->add_recv_key("recv_3");
run_graph_request->set_is_partial(true);
}
void CheckRunGraphRequest(const RunGraphRequestWrapper& request) {
EXPECT_EQ("graph_handle", request.graph_handle());
EXPECT_EQ(13, request.step_id());
EXPECT_FALSE(request.exec_opts().record_costs());
EXPECT_TRUE(request.exec_opts().record_timeline());
EXPECT_FALSE(request.exec_opts().record_partition_graphs());
EXPECT_EQ(2, request.num_sends());
Tensor val;
TF_EXPECT_OK(request.SendValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.SendValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_TRUE(request.is_partial());
EXPECT_FALSE(request.is_last_partial_run());
}
void BuildRunGraphResponse(MutableRunGraphResponseWrapper* run_graph_response) {
run_graph_response->AddRecv("recv_2", TensorA());
run_graph_response->AddRecv("recv_3", TensorB());
run_graph_response->mutable_step_stats()->add_dev_stats()->set_device(
"/cpu:0");
run_graph_response->mutable_cost_graph()->add_node()->set_name("cost_node");
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
run_graph_response->AddPartitionGraph(graph_def);
}
void CheckRunGraphResponse(MutableRunGraphResponseWrapper* response) {
ASSERT_EQ(2, response->num_recvs());
EXPECT_EQ("recv_2", response->recv_key(0));
EXPECT_EQ("recv_3", response->recv_key(1));
Tensor val;
TF_EXPECT_OK(response->RecvValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response->RecvValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response->mutable_step_stats()->dev_stats_size());
EXPECT_EQ("/cpu:0", response->mutable_step_stats()->dev_stats(0).device());
ASSERT_EQ(1, response->mutable_cost_graph()->node_size());
EXPECT_EQ("cost_node", response->mutable_cost_graph()->node(0).name());
ASSERT_EQ(1, response->num_partition_graphs());
EXPECT_EQ(1234, response->mutable_partition_graph(0)->versions().producer());
EXPECT_EQ(1234,
response->mutable_partition_graph(0)->versions().min_consumer());
}
void BuildRunStepResponse(MutableRunGraphResponseWrapper* run_graph_response,
MutableRunStepResponseWrapper* run_step_response) {
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_x:0", run_graph_response, 0));
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_y:0", run_graph_response, 1));
*run_step_response->mutable_metadata()->mutable_step_stats() =
*run_graph_response->mutable_step_stats();
protobuf::RepeatedPtrField<GraphDef>* partition_graph_defs =
run_step_response->mutable_metadata()->mutable_partition_graphs();
for (size_t i = 0; i < run_graph_response->num_partition_graphs(); i++) {
partition_graph_defs->Add()->Swap(
run_graph_response->mutable_partition_graph(i));
}
}
void CheckRunStepResponse(const MutableRunStepResponseWrapper& response) {
ASSERT_EQ(2, response.num_tensors());
EXPECT_EQ("fetch_x:0", response.tensor_name(0));
EXPECT_EQ("fetch_y:0", response.tensor_name(1));
Tensor val;
TF_EXPECT_OK(response.TensorValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response.TensorValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response.metadata().step_stats().dev_stats_size());
EXPECT_EQ("/cpu:0", response.metadata().step_stats().dev_stats(0).device());
ASSERT_EQ(1, response.metadata().partition_graphs_size());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().producer());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().min_consumer());
}
TEST(MessageWrappers, RunStepRequest_Basic) {
InMemoryRunStepRequest in_memory_request;
BuildRunStepRequest(&in_memory_request);
CheckRunStepRequest(in_memory_request);
MutableProtoRunStepRequest proto_request;
BuildRunStepRequest(&proto_request);
CheckRunStepRequest(proto_request);
CheckRunStepRequest(ProtoRunStepRequest(&in_memory_request.ToProto()));
CheckRunStepRequest(ProtoRunStepRequest(&proto_request.ToProto()));
}
TEST(MessageWrappers, RunGraphRequest_Basic) {
InMemoryRunStepRequest in_memory_run_step_request;
BuildRunStepRequest(&in_memory_run_step_request);
MutableProtoRunStepRequest mutable_proto_run_step_request;
BuildRunStepRequest(&mutable_proto_run_step_request);
ProtoRunStepRequest proto_run_step_request(
&mutable_proto_run_step_request.ToProto());
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
}
TEST(MessageWrappers, RunGraphResponse_Basic) {
InMemoryRunGraphResponse in_memory_response;
BuildRunGraphResponse(&in_memory_response);
CheckRunGraphResponse(&in_memory_response);
OwnedProtoRunGraphResponse owned_proto_response;
BuildRunGraphResponse(&owned_proto_response);
CheckRunGraphResponse(&owned_proto_response);
RunGraphResponse response_proto;
NonOwnedProtoRunGraphResponse non_owned_proto_response(&response_proto);
BuildRunGraphResponse(&non_owned_proto_response);
CheckRunGraphResponse(&non_owned_proto_response);
}
TEST(MessageWrappers, RunStepResponse_Basic) {
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
}
}
} |
16 | #include "gmock/gmock-cardinalities.h"
#include <limits.h>
#include <ostream>
#include <sstream>
#include <string>
#include "gmock/internal/gmock-internal-utils.h"
#include "gtest/gtest.h"
namespace testing {
namespace {
class BetweenCardinalityImpl : public CardinalityInterface {
public:
BetweenCardinalityImpl(int min, int max)
: min_(min >= 0 ? min : 0), max_(max >= min_ ? max : min_) {
std::stringstream ss;
if (min < 0) {
ss << "The invocation lower bound must be >= 0, "
<< "but is actually " << min << ".";
internal::Expect(false, __FILE__, __LINE__, ss.str());
} else if (max < 0) {
ss << "The invocation upper bound must be >= 0, "
<< "but is actually " << max << ".";
internal::Expect(false, __FILE__, __LINE__, ss.str());
} else if (min > max) {
ss << "The invocation upper bound (" << max
<< ") must be >= the invocation lower bound (" << min << ").";
internal::Expect(false, __FILE__, __LINE__, ss.str());
}
}
int ConservativeLowerBound() const override { return min_; }
int ConservativeUpperBound() const override { return max_; }
bool IsSatisfiedByCallCount(int call_count) const override {
return min_ <= call_count && call_count <= max_;
}
bool IsSaturatedByCallCount(int call_count) const override {
return call_count >= max_;
}
void DescribeTo(::std::ostream* os) const override;
private:
const int min_;
const int max_;
BetweenCardinalityImpl(const BetweenCardinalityImpl&) = delete;
BetweenCardinalityImpl& operator=(const BetweenCardinalityImpl&) = delete;
};
inline std::string FormatTimes(int n) {
if (n == 1) {
return "once";
} else if (n == 2) {
return "twice";
} else {
std::stringstream ss;
ss << n << " times";
return ss.str();
}
}
void BetweenCardinalityImpl::DescribeTo(::std::ostream* os) const {
if (min_ == 0) {
if (max_ == 0) {
*os << "never called";
} else if (max_ == INT_MAX) {
*os << "called any number of times";
} else {
*os << "called at most " << FormatTimes(max_);
}
} else if (min_ == max_) {
*os << "called " << FormatTimes(min_);
} else if (max_ == INT_MAX) {
*os << "called at least " << FormatTimes(min_);
} else {
*os << "called between " << min_ << " and " << max_ << " times";
}
}
}
void Cardinality::DescribeActualCallCountTo(int actual_call_count,
::std::ostream* os) {
if (actual_call_count > 0) {
*os << "called " << FormatTimes(actual_call_count);
} else {
*os << "never called";
}
}
GTEST_API_ Cardinality AtLeast(int n) { return Between(n, INT_MAX); }
GTEST_API_ Cardinality AtMost(int n) { return Between(0, n); }
GTEST_API_ Cardinality AnyNumber() { return AtLeast(0); }
GTEST_API_ Cardinality Between(int min, int max) {
return Cardinality(new BetweenCardinalityImpl(min, max));
}
GTEST_API_ Cardinality Exactly(int n) { return Between(n, n); }
} | #include <ostream>
#include "gmock/gmock.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
namespace {
using std::stringstream;
using testing::AnyNumber;
using testing::AtLeast;
using testing::AtMost;
using testing::Between;
using testing::Cardinality;
using testing::CardinalityInterface;
using testing::Exactly;
using testing::IsSubstring;
using testing::MakeCardinality;
class MockFoo {
public:
MockFoo() = default;
MOCK_METHOD0(Bar, int());
private:
MockFoo(const MockFoo&) = delete;
MockFoo& operator=(const MockFoo&) = delete;
};
TEST(CardinalityTest, IsDefaultConstructable) { Cardinality c; }
TEST(CardinalityTest, IsCopyable) {
Cardinality c = Exactly(1);
EXPECT_FALSE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
c = Exactly(2);
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
}
TEST(CardinalityTest, IsOverSaturatedByCallCountWorks) {
const Cardinality c = AtMost(5);
EXPECT_FALSE(c.IsOverSaturatedByCallCount(4));
EXPECT_FALSE(c.IsOverSaturatedByCallCount(5));
EXPECT_TRUE(c.IsOverSaturatedByCallCount(6));
}
TEST(CardinalityTest, CanDescribeActualCallCount) {
stringstream ss0;
Cardinality::DescribeActualCallCountTo(0, &ss0);
EXPECT_EQ("never called", ss0.str());
stringstream ss1;
Cardinality::DescribeActualCallCountTo(1, &ss1);
EXPECT_EQ("called once", ss1.str());
stringstream ss2;
Cardinality::DescribeActualCallCountTo(2, &ss2);
EXPECT_EQ("called twice", ss2.str());
stringstream ss3;
Cardinality::DescribeActualCallCountTo(3, &ss3);
EXPECT_EQ("called 3 times", ss3.str());
}
TEST(AnyNumber, Works) {
const Cardinality c = AnyNumber();
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(9));
EXPECT_FALSE(c.IsSaturatedByCallCount(9));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called any number of times", ss.str());
}
TEST(AnyNumberTest, HasCorrectBounds) {
const Cardinality c = AnyNumber();
EXPECT_EQ(0, c.ConservativeLowerBound());
EXPECT_EQ(INT_MAX, c.ConservativeUpperBound());
}
TEST(AtLeastTest, OnNegativeNumber) {
EXPECT_NONFATAL_FAILURE(
{
AtLeast(-1);
},
"The invocation lower bound must be >= 0");
}
TEST(AtLeastTest, OnZero) {
const Cardinality c = AtLeast(0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "any number of times", ss.str());
}
TEST(AtLeastTest, OnPositiveNumber) {
const Cardinality c = AtLeast(2);
EXPECT_FALSE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSaturatedByCallCount(2));
stringstream ss1;
AtLeast(1).DescribeTo(&ss1);
EXPECT_PRED_FORMAT2(IsSubstring, "at least once", ss1.str());
stringstream ss2;
c.DescribeTo(&ss2);
EXPECT_PRED_FORMAT2(IsSubstring, "at least twice", ss2.str());
stringstream ss3;
AtLeast(3).DescribeTo(&ss3);
EXPECT_PRED_FORMAT2(IsSubstring, "at least 3 times", ss3.str());
}
TEST(AtLeastTest, HasCorrectBounds) {
const Cardinality c = AtLeast(2);
EXPECT_EQ(2, c.ConservativeLowerBound());
EXPECT_EQ(INT_MAX, c.ConservativeUpperBound());
}
TEST(AtMostTest, OnNegativeNumber) {
EXPECT_NONFATAL_FAILURE(
{
AtMost(-1);
},
"The invocation upper bound must be >= 0");
}
TEST(AtMostTest, OnZero) {
const Cardinality c = AtMost(0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str());
}
TEST(AtMostTest, OnPositiveNumber) {
const Cardinality c = AtMost(2);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
stringstream ss1;
AtMost(1).DescribeTo(&ss1);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most once", ss1.str());
stringstream ss2;
c.DescribeTo(&ss2);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", ss2.str());
stringstream ss3;
AtMost(3).DescribeTo(&ss3);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most 3 times", ss3.str());
}
TEST(AtMostTest, HasCorrectBounds) {
const Cardinality c = AtMost(2);
EXPECT_EQ(0, c.ConservativeLowerBound());
EXPECT_EQ(2, c.ConservativeUpperBound());
}
TEST(BetweenTest, OnNegativeStart) {
EXPECT_NONFATAL_FAILURE(
{
Between(-1, 2);
},
"The invocation lower bound must be >= 0, but is actually -1");
}
TEST(BetweenTest, OnNegativeEnd) {
EXPECT_NONFATAL_FAILURE(
{
Between(1, -2);
},
"The invocation upper bound must be >= 0, but is actually -2");
}
TEST(BetweenTest, OnStartBiggerThanEnd) {
EXPECT_NONFATAL_FAILURE(
{
Between(2, 1);
},
"The invocation upper bound (1) must be >= "
"the invocation lower bound (2)");
}
TEST(BetweenTest, OnZeroStartAndZeroEnd) {
const Cardinality c = Between(0, 0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str());
}
TEST(BetweenTest, OnZeroStartAndNonZeroEnd) {
const Cardinality c = Between(0, 2);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
EXPECT_FALSE(c.IsSatisfiedByCallCount(4));
EXPECT_TRUE(c.IsSaturatedByCallCount(4));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", ss.str());
}
TEST(BetweenTest, OnSameStartAndEnd) {
const Cardinality c = Between(3, 3);
EXPECT_FALSE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSaturatedByCallCount(2));
EXPECT_TRUE(c.IsSatisfiedByCallCount(3));
EXPECT_TRUE(c.IsSaturatedByCallCount(3));
EXPECT_FALSE(c.IsSatisfiedByCallCount(4));
EXPECT_TRUE(c.IsSaturatedByCallCount(4));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", ss.str());
}
TEST(BetweenTest, OnDifferentStartAndEnd) {
const Cardinality c = Between(3, 5);
EXPECT_FALSE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSaturatedByCallCount(2));
EXPECT_TRUE(c.IsSatisfiedByCallCount(3));
EXPECT_FALSE(c.IsSaturatedByCallCount(3));
EXPECT_TRUE(c.IsSatisfiedByCallCount(5));
EXPECT_TRUE(c.IsSaturatedByCallCount(5));
EXPECT_FALSE(c.IsSatisfiedByCallCount(6));
EXPECT_TRUE(c.IsSaturatedByCallCount(6));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called between 3 and 5 times", ss.str());
}
TEST(BetweenTest, HasCorrectBounds) {
const Cardinality c = Between(3, 5);
EXPECT_EQ(3, c.ConservativeLowerBound());
EXPECT_EQ(5, c.ConservativeUpperBound());
}
TEST(ExactlyTest, OnNegativeNumber) {
EXPECT_NONFATAL_FAILURE(
{
Exactly(-1);
},
"The invocation lower bound must be >= 0");
}
TEST(ExactlyTest, OnZero) {
const Cardinality c = Exactly(0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str());
}
TEST(ExactlyTest, OnPositiveNumber) {
const Cardinality c = Exactly(2);
EXPECT_FALSE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
stringstream ss1;
Exactly(1).DescribeTo(&ss1);
EXPECT_PRED_FORMAT2(IsSubstring, "called once", ss1.str());
stringstream ss2;
c.DescribeTo(&ss2);
EXPECT_PRED_FORMAT2(IsSubstring, "called twice", ss2.str());
stringstream ss3;
Exactly(3).DescribeTo(&ss3);
EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", ss3.str());
}
TEST(ExactlyTest, HasCorrectBounds) {
const Cardinality c = Exactly(3);
EXPECT_EQ(3, c.ConservativeLowerBound());
EXPECT_EQ(3, c.ConservativeUpperBound());
}
class EvenCardinality : public CardinalityInterface {
public:
bool IsSatisfiedByCallCount(int call_count) const override {
return (call_count % 2 == 0);
}
bool IsSaturatedByCallCount(int ) const override {
return false;
}
void DescribeTo(::std::ostream* ss) const override {
*ss << "called even number of times";
}
};
TEST(MakeCardinalityTest, ConstructsCardinalityFromInterface) {
const Cardinality c = MakeCardinality(new EvenCardinality);
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSatisfiedByCallCount(3));
EXPECT_FALSE(c.IsSaturatedByCallCount(10000));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_EQ("called even number of times", ss.str());
}
} |
17 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_AND_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_AND_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct AndOp {
struct Attributes {};
};
AndOp Create(AndOp::Attributes);
absl::Status Prepare(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
absl::Status Evaluate(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
AndOp Create(AndOp::Attributes) { return {}; }
absl::Status Prepare(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("and"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
And<DataType::kSI32> and_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
and_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
And<DataType::kI1> and_func;
detail::EvaluateNoQuantization<DataType::kI1>(and_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.and: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<AndOp> {
static std::string Get() { return "And"; }
};
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
template <>
struct SupportedOpDataType<AndOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseOpShapePropagationTest,
AndOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
AndOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
And, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<AndOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct AndTest : ::testing::Test {};
TYPED_TEST_SUITE(AndTest, SupportedTypes, TestParamNames);
TYPED_TEST(AndTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
And<TypeParam::kStorage>());
auto op = Create(AndOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} |
18 | #ifndef I18N_ADDRESSINPUT_LOCALIZATION_H_
#define I18N_ADDRESSINPUT_LOCALIZATION_H_
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_problem.h>
#include <string>
namespace i18n {
namespace addressinput {
struct AddressData;
class Localization {
public:
Localization(const Localization&) = delete;
Localization& operator=(const Localization&) = delete;
Localization();
~Localization() = default;
std::string GetString(int message_id) const;
std::string GetErrorMessage(const AddressData& address,
AddressField field,
AddressProblem problem,
bool enable_examples,
bool enable_links) const;
void SetGetter(std::string (*getter)(int));
private:
std::string GetErrorMessageForPostalCode(
AddressProblem problem,
bool uses_postal_code_as_label,
const std::string& postal_code_example,
const std::string& post_service_url) const;
std::string (*get_string_)(int);
};
}
}
#endif
#include <libaddressinput/localization.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_problem.h>
#include <cassert>
#include <cstddef>
#include <string>
#include <vector>
#include "messages.h"
#include "region_data_constants.h"
#include "rule.h"
#include "util/string_split.h"
#include "util/string_util.h"
namespace {
void PushBackUrl(const std::string& url, std::vector<std::string>* parameters) {
assert(parameters != nullptr);
parameters->push_back("<a href=\"" + url + "\">");
parameters->emplace_back("</a>");
}
}
namespace i18n {
namespace addressinput {
namespace {
#include "en_messages.cc"
std::string GetEnglishString(int message_id) {
const char* str = GetString(message_id);
return str != nullptr ? std::string(str) : std::string();
}
}
Localization::Localization() : get_string_(&GetEnglishString) {}
std::string Localization::GetString(int message_id) const {
return get_string_(message_id);
}
std::string Localization::GetErrorMessage(const AddressData& address,
AddressField field,
AddressProblem problem,
bool enable_examples,
bool enable_links) const {
if (field == POSTAL_CODE) {
Rule rule;
rule.CopyFrom(Rule::GetDefault());
std::string postal_code_example, post_service_url;
if (rule.ParseSerializedRule(
RegionDataConstants::GetRegionData(address.region_code))) {
if (enable_examples) {
std::vector<std::string> examples_list;
SplitString(rule.GetPostalCodeExample(), ',', &examples_list);
if (!examples_list.empty()) {
postal_code_example = examples_list.front();
}
}
if (enable_links) {
post_service_url = rule.GetPostServiceUrl();
}
} else {
assert(false);
}
bool uses_postal_code_as_label =
rule.GetPostalCodeNameMessageId() ==
IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL;
return GetErrorMessageForPostalCode(problem, uses_postal_code_as_label,
postal_code_example, post_service_url);
} else {
if (problem == MISSING_REQUIRED_FIELD) {
return get_string_(IDS_LIBADDRESSINPUT_MISSING_REQUIRED_FIELD);
} else if (problem == UNKNOWN_VALUE) {
std::vector<std::string> parameters;
if (AddressData::IsRepeatedFieldValue(field)) {
const auto& values = address.GetRepeatedFieldValue(field);
assert(!values.empty());
parameters.push_back(values.front());
} else {
parameters.push_back(address.GetFieldValue(field));
}
return DoReplaceStringPlaceholders(
get_string_(IDS_LIBADDRESSINPUT_UNKNOWN_VALUE), parameters);
} else if (problem == USES_P_O_BOX) {
return get_string_(IDS_LIBADDRESSINPUT_PO_BOX_FORBIDDEN_VALUE);
} else {
assert(false);
return "";
}
}
}
void Localization::SetGetter(std::string (*getter)(int)) {
assert(getter != nullptr);
get_string_ = getter;
}
std::string Localization::GetErrorMessageForPostalCode(
AddressProblem problem,
bool uses_postal_code_as_label,
const std::string& postal_code_example,
const std::string& post_service_url) const {
int message_id;
std::vector<std::string> parameters;
if (problem == MISSING_REQUIRED_FIELD) {
if (!postal_code_example.empty() && !post_service_url.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE_AND_URL :
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE_AND_URL;
parameters.push_back(postal_code_example);
PushBackUrl(post_service_url, ¶meters);
} else if (!postal_code_example.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE :
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE;
parameters.push_back(postal_code_example);
} else {
message_id = IDS_LIBADDRESSINPUT_MISSING_REQUIRED_FIELD;
}
return DoReplaceStringPlaceholders(get_string_(message_id), parameters);
} else if (problem == INVALID_FORMAT) {
if (!postal_code_example.empty() && !post_service_url.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE_AND_URL :
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE_AND_URL;
parameters.push_back(postal_code_example);
PushBackUrl(post_service_url, ¶meters);
} else if (!postal_code_example.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE :
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE;
parameters.push_back(postal_code_example);
} else {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE :
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP;
}
return DoReplaceStringPlaceholders(get_string_(message_id), parameters);
} else if (problem == MISMATCHING_VALUE) {
if (!post_service_url.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE_URL :
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP_URL;
PushBackUrl(post_service_url, ¶meters);
} else {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE :
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP;
}
return DoReplaceStringPlaceholders(get_string_(message_id), parameters);
} else {
assert(false);
return "";
}
}
}
} | #include <libaddressinput/localization.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_problem.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "grit.h"
#include "messages.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::AddressField;
using i18n::addressinput::INVALID_MESSAGE_ID;
using i18n::addressinput::Localization;
using i18n::addressinput::COUNTRY;
using i18n::addressinput::ADMIN_AREA;
using i18n::addressinput::LOCALITY;
using i18n::addressinput::DEPENDENT_LOCALITY;
using i18n::addressinput::SORTING_CODE;
using i18n::addressinput::POSTAL_CODE;
using i18n::addressinput::STREET_ADDRESS;
using i18n::addressinput::ORGANIZATION;
using i18n::addressinput::RECIPIENT;
using i18n::addressinput::MISSING_REQUIRED_FIELD;
using i18n::addressinput::UNKNOWN_VALUE;
using i18n::addressinput::INVALID_FORMAT;
using i18n::addressinput::MISMATCHING_VALUE;
using i18n::addressinput::USES_P_O_BOX;
class LocalizationTest : public testing::TestWithParam<int> {
public:
LocalizationTest(const LocalizationTest&) = delete;
LocalizationTest& operator=(const LocalizationTest&) = delete;
protected:
LocalizationTest() = default;
Localization localization_;
};
const char kValidMessage[] = "Data";
std::string GetValidMessage(int message_id) { return kValidMessage; }
TEST_P(LocalizationTest, ValidStringGetterCanBeUsed) {
localization_.SetGetter(&GetValidMessage);
EXPECT_EQ(kValidMessage, localization_.GetString(GetParam()));
}
TEST_P(LocalizationTest, DefaultStringIsNotEmpty) {
EXPECT_FALSE(localization_.GetString(GetParam()).empty());
}
TEST_P(LocalizationTest, NoNewline) {
EXPECT_EQ(std::string::npos, localization_.GetString(GetParam()).find('\n'));
}
TEST_P(LocalizationTest, NoDoubleSpace) {
EXPECT_EQ(std::string::npos,
localization_.GetString(GetParam()).find(std::string(2U, ' ')));
}
INSTANTIATE_TEST_SUITE_P(
AllMessages, LocalizationTest,
testing::Values(
IDS_LIBADDRESSINPUT_COUNTRY_OR_REGION_LABEL,
IDS_LIBADDRESSINPUT_LOCALITY_LABEL,
IDS_LIBADDRESSINPUT_ADDRESS_LINE_1_LABEL,
IDS_LIBADDRESSINPUT_PIN_CODE_LABEL,
IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL,
IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL, IDS_LIBADDRESSINPUT_AREA,
IDS_LIBADDRESSINPUT_COUNTY, IDS_LIBADDRESSINPUT_DEPARTMENT,
IDS_LIBADDRESSINPUT_DISTRICT, IDS_LIBADDRESSINPUT_DO_SI,
IDS_LIBADDRESSINPUT_EMIRATE, IDS_LIBADDRESSINPUT_ISLAND,
IDS_LIBADDRESSINPUT_PARISH, IDS_LIBADDRESSINPUT_PREFECTURE,
IDS_LIBADDRESSINPUT_PROVINCE, IDS_LIBADDRESSINPUT_STATE,
IDS_LIBADDRESSINPUT_ORGANIZATION_LABEL,
IDS_LIBADDRESSINPUT_RECIPIENT_LABEL,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_FIELD,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_UNKNOWN_VALUE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE_URL,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP_URL,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP,
IDS_LIBADDRESSINPUT_PO_BOX_FORBIDDEN_VALUE));
TEST_F(LocalizationTest, InvalidMessageIsEmptyString) {
EXPECT_TRUE(localization_.GetString(INVALID_MESSAGE_ID).empty());
}
TEST(LocalizationGetErrorMessageTest, MissingRequiredPostalCode) {
Localization localization;
const AddressData address{.region_code = "CH"};
EXPECT_EQ("You must provide a postal code, for example 2544."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, true));
EXPECT_EQ("You must provide a postal code, for example 2544.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, true));
}
TEST(LocalizationGetErrorMessageTest, MissingRequiredZipCode) {
Localization localization;
const AddressData address{.region_code = "US"};
EXPECT_EQ("You must provide a ZIP code, for example 95014."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, true));
EXPECT_EQ("You must provide a ZIP code, for example 95014.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, true));
}
TEST(LocalizationGetErrorMessageTest, MissingRequiredOtherFields) {
Localization localization;
const AddressData address{.region_code = "US"};
const std::vector<AddressField> other_fields{
COUNTRY,
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
SORTING_CODE,
STREET_ADDRESS,
ORGANIZATION,
RECIPIENT,
};
for (AddressField field : other_fields) {
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, true, true));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, true, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, false, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, false, true));
}
}
TEST(LocalizationGetErrorMessageTest, UnknownValueOtherFields) {
Localization localization;
const AddressData address{
.region_code = "US",
.address_line{
"bad address line 1",
"bad address line 2",
},
.administrative_area = "bad admin area",
.locality = "bad locality",
.dependent_locality = "bad dependent locality",
.sorting_code = "bad sorting code",
.organization = "bad organization",
.recipient = "bad recipient",
};
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, true, true));
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, true, false));
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, false, false));
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, false, true));
}
TEST(LocalizationGetErrorMessageTest, InvalidFormatPostalCode) {
Localization localization;
const AddressData address{.region_code = "CH"};
EXPECT_EQ("This postal code format is not recognized. Example "
"of a valid postal code: 2544."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, true));
EXPECT_EQ("This postal code format is not recognized. Example "
"of a valid postal code: 2544.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, false));
EXPECT_EQ("This postal code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, false));
EXPECT_EQ("This postal code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, true));
}
TEST(LocalizationGetErrorMessageTest, InvalidFormatZipCode) {
Localization localization;
const AddressData address{.region_code = "US"};
EXPECT_EQ("This ZIP code format is not recognized. Example of "
"a valid ZIP code: 95014."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, true));
EXPECT_EQ("This ZIP code format is not recognized. Example of "
"a valid ZIP code: 95014.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, false));
EXPECT_EQ("This ZIP code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, false));
EXPECT_EQ("This ZIP code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, true));
}
TEST(LocalizationGetErrorMessageTest, MismatchingValuePostalCode) {
Localization localization;
const AddressData address{.region_code = "CH"};
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, true));
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, false));
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, false));
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, true));
}
TEST(LocalizationGetErrorMessageTest, MismatchingValueZipCode) {
Localization localization;
const AddressData address{.region_code = "US"};
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, true));
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, false));
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, false));
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, true));
}
TEST(LocalizationGetErrorMessageTest, UsesPOBoxOtherFields) {
Localization localization;
const AddressData address{.region_code = "US"};
const std::vector<AddressField> other_fields{
COUNTRY,
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
SORTING_CODE,
STREET_ADDRESS,
ORGANIZATION,
RECIPIENT,
};
for (AddressField field : other_fields) {
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, true, true));
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, true, false));
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, false, false));
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, false, true));
}
}
} |
19 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewQuantizeAndDequantizeNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.h"
#include <any>
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class QuantizeAndDequantize : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string code = R"(
value_0 = clamp(value_0, vec4($quant_min$), vec4($quant_max$));
value_0 = (value_0 - vec4($quant_min$)) / vec4($quant_scale$);
value_0 = floor(value_0 + vec4(0.5));
value_0 = value_0 * vec4($quant_scale$) + vec4($quant_min$);
)";
const auto& attr =
std::any_cast<const QuantizeAndDequantizeAttributes&>(ctx.op_attr);
*generated_code = {
{{"quant_min", attr.min},
{"quant_max", attr.max},
{"quant_scale", attr.scale}},
{},
{},
uint3(),
uint3(),
code,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewQuantizeAndDequantizeNodeShader() {
return std::make_unique<QuantizeAndDequantize>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(QuantizeAndDequantizeTest, Dim2Bits8) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 2, 1);
const int num_bits = 8;
const int quant_min = 0;
const int quant_max = (1 << num_bits) - 1;
QuantizeAndDequantizeAttributes attr;
NudgeQuantizationRange( 0.0, 1.0,
quant_min, quant_max, &attr.min, &attr.max,
&attr.scale);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 2, 1);
SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr},
{input}, {output});
ASSERT_TRUE(
model.PopulateTensor(0, {0.0, 1.0, 0.25, 0.50, 0.4444444, 0.00001}));
ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{0.0f, 1.0f, 0.25098f, 0.498039f, 0.443137f, 0.0f}));
}
TEST(QuantizeAndDequantizeTest, Dim3Bits8_NegativeRange) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 1, 2);
const int num_bits = 8;
const int quant_min = 0;
const int quant_max = (1 << num_bits) - 1;
QuantizeAndDequantizeAttributes attr;
NudgeQuantizationRange( -0.9, 0.9,
quant_min, quant_max, &attr.min, &attr.max,
&attr.scale);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 1, 2);
SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr},
{input}, {output});
ASSERT_TRUE(
model.PopulateTensor(0, {0.0, -0.9, 0.25, 0.50, 0.4444444, -0.00001}));
ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0f, -0.896471f, 0.247059f,
0.501176f, 0.444706f, 0.0f}));
}
TEST(QuantizeAndDequantizeTest, Dim3Bits16) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 1, 2);
const int num_bits = 16;
const int quant_min = 0;
const int quant_max = (1 << num_bits) - 1;
QuantizeAndDequantizeAttributes attr;
NudgeQuantizationRange( 0.0, 1.0,
quant_min, quant_max, &attr.min, &attr.max,
&attr.scale);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 1, 2);
SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr},
{input}, {output});
ASSERT_TRUE(
model.PopulateTensor(0, {0.0, 1.0, 0.25, 0.50, 0.4444444, 0.00001}));
ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0f, 1.0f, 0.250004f, 0.500008f,
0.44445f, 1.5259e-05f}));
}
TEST(QuantizeAndDequantizeTest, Dim2Bits16_NegativeRange) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 2, 1);
const int num_bits = 16;
const int quant_min = 0;
const int quant_max = (1 << num_bits) - 1;
QuantizeAndDequantizeAttributes attr;
NudgeQuantizationRange( -0.9, 0.9,
quant_min, quant_max, &attr.min, &attr.max,
&attr.scale);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 2, 1);
SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr},
{input}, {output});
ASSERT_TRUE(
model.PopulateTensor(0, {0.0, -0.9, 0.25, 0.50, 0.4444444, -0.00001}));
ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0f, -0.900014f, 0.249998f,
0.499995f, 0.444431f, 0.0f}));
}
}
}
}
} |
20 | #ifndef AROLLA_EXPR_VISITORS_SUBSTITUTION_H_
#define AROLLA_EXPR_VISITORS_SUBSTITUTION_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr_node.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
absl::StatusOr<ExprNodePtr> SubstituteByName(
ExprNodePtr expr,
const absl::flat_hash_map<std::string, ExprNodePtr>& subs);
absl::StatusOr<ExprNodePtr> SubstituteLeaves(
ExprNodePtr expr,
const absl::flat_hash_map<std::string, ExprNodePtr>& subs);
absl::StatusOr<ExprNodePtr> SubstitutePlaceholders(
ExprNodePtr expr, const absl::flat_hash_map<std::string, ExprNodePtr>& subs,
bool must_substitute_all = false);
absl::StatusOr<ExprNodePtr> SubstituteByFingerprint(
ExprNodePtr expr,
const absl::flat_hash_map<Fingerprint, ExprNodePtr>& subs);
}
#endif
#include "arolla/expr/visitors/substitution.h"
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
namespace {
template <class Key, class KeyFn>
absl::StatusOr<ExprNodePtr> Substitute(
ExprNodePtr expr, const absl::flat_hash_map<Key, ExprNodePtr>& subs,
KeyFn key_fn) {
return PostOrderTraverse(
expr,
[&](const ExprNodePtr& node, absl::Span<const ExprNodePtr* const> visits)
-> absl::StatusOr<ExprNodePtr> {
if (auto key = key_fn(node); key.has_value()) {
if (auto it = subs.find(*key); it != subs.end()) {
return it->second;
}
}
return WithNewDependencies(node, DereferenceVisitPointers(visits));
});
}
}
absl::StatusOr<ExprNodePtr> SubstituteByName(
ExprNodePtr expr,
const absl::flat_hash_map<std::string, ExprNodePtr>& subs) {
return Substitute(expr, subs,
[](const auto& expr) -> std::optional<std::string> {
if (IsNameAnnotation(expr)) {
return std::string(ReadNameAnnotation(expr));
}
return std::nullopt;
});
}
absl::StatusOr<ExprNodePtr> SubstituteLeaves(
ExprNodePtr expr,
const absl::flat_hash_map<std::string, ExprNodePtr>& subs) {
return Substitute(expr, subs,
[](const auto& expr) -> std::optional<std::string> {
if (expr->is_leaf()) return expr->leaf_key();
return std::nullopt;
});
}
absl::StatusOr<ExprNodePtr> SubstitutePlaceholders(
ExprNodePtr expr, const absl::flat_hash_map<std::string, ExprNodePtr>& subs,
bool must_substitute_all) {
return PostOrderTraverse(
expr,
[&](const ExprNodePtr& node, absl::Span<const ExprNodePtr* const> visits)
-> absl::StatusOr<ExprNodePtr> {
if (node->is_placeholder()) {
if (subs.contains(node->placeholder_key())) {
return subs.at(node->placeholder_key());
} else if (must_substitute_all) {
return absl::InvalidArgumentError(absl::StrFormat(
"No value was provided for P.%s, but substitution of all "
"placeholders was requested.",
node->placeholder_key()));
}
}
return WithNewDependencies(node, DereferenceVisitPointers(visits));
});
}
absl::StatusOr<ExprNodePtr> SubstituteByFingerprint(
ExprNodePtr expr,
const absl::flat_hash_map<Fingerprint, ExprNodePtr>& subs) {
return Substitute(expr, subs,
[](const auto& expr) -> std::optional<Fingerprint> {
return expr->fingerprint();
});
}
} | #include "arolla/expr/visitors/substitution.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::WithNameAnnotation;
class SubstitutionTest : public ::testing::Test {
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(SubstitutionTest, SubsByName) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Leaf("x"), "lx"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Leaf("y"), "ly"));
ASSERT_OK_AND_ASSIGN(auto z, WithNameAnnotation(Leaf("z"), "lz"));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr, CallOp("math.add", {x, z}));
EXPECT_THAT(SubstituteByName(expr, {{"ly", z}}),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
TEST_F(SubstitutionTest, SubstituteLeavesByName) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Leaf("x"), "lx"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Leaf("y"), "ly"));
EXPECT_THAT(SubstituteByName(x, {{"lx", y}}), IsOkAndHolds(EqualsExpr(y)));
}
TEST_F(SubstitutionTest, SubstitutePlaceholdersByName) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Placeholder("x"), "px"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Placeholder("y"), "py"));
EXPECT_THAT(SubstituteByName(x, {{"px", y}}), IsOkAndHolds(EqualsExpr(y)));
EXPECT_THAT(SubstituteByName(x, {{"x", y}}), IsOkAndHolds(EqualsExpr(x)));
}
TEST_F(SubstitutionTest, SubstitutePlaceholders) {
auto px = Placeholder("x");
auto py = Placeholder("y");
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(px, "name"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(py, "name"));
EXPECT_THAT(SubstitutePlaceholders(x, {{"x", py}}),
IsOkAndHolds(EqualsExpr(y)));
EXPECT_THAT(SubstitutePlaceholders(x, {{"name", py}}),
IsOkAndHolds(EqualsExpr(x)));
}
TEST_F(SubstitutionTest, SubstituteLeaves) {
auto lx = Leaf("x");
auto ly = Leaf("y");
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(lx, "name"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(ly, "name"));
EXPECT_THAT(SubstituteLeaves(x, {{"x", ly}}), IsOkAndHolds(EqualsExpr(y)));
EXPECT_THAT(SubstituteLeaves(x, {{"name", ly}}), IsOkAndHolds(EqualsExpr(x)));
}
TEST_F(SubstitutionTest, SubsByFingerprint) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Leaf("x"), "lx"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Leaf("y"), "lx"));
ASSERT_OK_AND_ASSIGN(auto z, WithNameAnnotation(Leaf("z"), "lz"));
ASSERT_OK_AND_ASSIGN(auto x_add_expr, CallOp("math.add", {x, x}));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x_add_expr, y}));
absl::flat_hash_map<Fingerprint, ExprNodePtr> subs = {
{x->fingerprint(), y},
{x_add_expr->fingerprint(), z},
{y->fingerprint(), x}};
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr, CallOp("math.add", {z, x}));
EXPECT_THAT(SubstituteByFingerprint(expr, subs),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
}
} |
21 | #ifndef AROLLA_QEXPR_OPERATOR_ERRORS_H_
#define AROLLA_QEXPR_OPERATOR_ERRORS_H_
#include <string>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
absl::Status OperatorNotDefinedError(absl::string_view operator_name,
absl::Span<const QTypePtr> input_types,
absl::string_view extra_message = "");
absl::Status VerifyInputSlotTypes(absl::Span<const TypedSlot> slots,
absl::Span<const QTypePtr> expected_types,
absl::string_view operator_name);
absl::Status VerifyOutputSlotType(TypedSlot slot, QTypePtr expected_type,
absl::string_view operator_name);
absl::Status VerifyInputValueTypes(absl::Span<const TypedValue> values,
absl::Span<const QTypePtr> expected_types,
absl::string_view operator_name);
absl::Status VerifyInputValueTypes(absl::Span<const TypedRef> values,
absl::Span<const QTypePtr> expected_types,
absl::string_view operator_name);
absl::Status VerifyOutputValueType(const TypedValue& value,
QTypePtr expected_type,
absl::string_view operator_name);
std::string GuessLibraryName(absl::string_view operator_name);
std::string GuessOperatorLibraryName(absl::string_view operator_name);
std::string SuggestMissingDependency();
std::string SuggestAvailableOverloads(
absl::string_view operator_name,
absl::Span<const QExprOperatorSignature* const> supported_qtypes);
}
#endif
#include "arolla/qexpr/operator_errors.h"
#include <cstddef>
#include <initializer_list>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
namespace {
absl::Status SlotTypesMismatchError(absl::string_view operator_name,
absl::string_view slots_kind,
absl::Span<const QTypePtr> expected_types,
absl::Span<const QTypePtr> got_types) {
return absl::FailedPreconditionError(absl::StrFormat(
"incorrect %s types for operator %s: expected %s, got %s", slots_kind,
operator_name, FormatTypeVector(expected_types),
FormatTypeVector(got_types)));
}
template <typename T>
std::vector<QTypePtr> GetQTypes(absl::Span<const T> objects) {
std::vector<QTypePtr> types;
types.reserve(objects.size());
for (const auto& o : objects) {
types.push_back(o.GetType());
}
return types;
}
template <typename T>
absl::Status VerifyTypes(absl::Span<const T> objects,
absl::Span<const QTypePtr> expected_types,
absl::string_view operator_name,
absl::string_view slots_kind) {
if (objects.size() != expected_types.size()) {
return SlotTypesMismatchError(operator_name, slots_kind, expected_types,
GetQTypes(objects));
}
for (size_t i = 0; i < objects.size(); ++i) {
if (objects[i].GetType() != expected_types[i]) {
return SlotTypesMismatchError(operator_name, slots_kind, expected_types,
GetQTypes(objects));
}
}
return absl::OkStatus();
}
}
absl::Status OperatorNotDefinedError(absl::string_view operator_name,
absl::Span<const QTypePtr> input_types,
absl::string_view extra_message) {
return absl::NotFoundError(absl::StrCat(
"operator ", operator_name, " is not defined for argument types ",
FormatTypeVector(input_types), extra_message.empty() ? "" : ": ",
extra_message));
}
absl::Status VerifyInputSlotTypes(absl::Span<const TypedSlot> slots,
absl::Span<const QTypePtr> expected_types,
absl::string_view operator_name) {
return VerifyTypes(slots, expected_types, operator_name, "input");
}
absl::Status VerifyOutputSlotType(TypedSlot slot, QTypePtr expected_type,
absl::string_view operator_name) {
return VerifyTypes<TypedSlot>({slot}, {expected_type}, operator_name,
"output");
}
absl::Status VerifyInputValueTypes(absl::Span<const TypedValue> values,
absl::Span<const QTypePtr> expected_types,
absl::string_view operator_name) {
return VerifyTypes(values, expected_types, operator_name, "input");
}
absl::Status VerifyInputValueTypes(absl::Span<const TypedRef> values,
absl::Span<const QTypePtr> expected_types,
absl::string_view operator_name) {
return VerifyTypes(values, expected_types, operator_name, "input");
}
absl::Status VerifyOutputValueType(const TypedValue& value,
QTypePtr expected_type,
absl::string_view operator_name) {
return VerifyTypes<TypedValue>({value}, {expected_type}, operator_name,
"output");
}
std::string GuessLibraryName(absl::string_view operator_name) {
std::string path = absl::StrReplaceAll(
operator_name.substr(0, operator_name.rfind('.')), {{".", "/"}});
return absl::StrCat("
}
std::string GuessOperatorLibraryName(absl::string_view operator_name) {
return absl::StrFormat("%s:operator_%s", GuessLibraryName(operator_name),
absl::AsciiStrToLower(operator_name.substr(
operator_name.rfind('.') + 1)));
}
std::string SuggestMissingDependency() {
return "adding \"@arolla:
"build dependency may help";
}
std::string SuggestAvailableOverloads(
absl::string_view operator_name,
absl::Span<const QExprOperatorSignature* const> supported_qtypes) {
std::vector<std::string> available_overloads;
for (const auto type : supported_qtypes) {
available_overloads.push_back(absl::StrFormat(
"%s(%s) -> %s", operator_name, JoinTypeNames(type->input_types()),
type->output_type()->name()));
}
return absl::StrFormat("available overloads:\n %s",
absl::StrJoin(available_overloads, ",\n "));
}
} | #include "arolla/qexpr/operator_errors.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::IsOk;
using ::arolla::testing::StatusIs;
using ::testing::Eq;
TEST(OperatorErrorsTest, OperatorNotDefinedError) {
absl::string_view op_name = "test.Not";
EXPECT_THAT(
OperatorNotDefinedError(op_name, {GetQType<int>(), GetQType<float>()}),
StatusIs(absl::StatusCode::kNotFound,
"operator test.Not is not defined for argument types "
"(INT32,FLOAT32)"));
EXPECT_THAT(OperatorNotDefinedError(op_name, {GetQType<int>()}, "Oops"),
StatusIs(absl::StatusCode::kNotFound,
"operator test.Not is not defined for argument types "
"(INT32): Oops"));
}
TEST(OperatorErrorsTest, VerifySlotTypes) {
absl::string_view op_name = "test.Not";
FrameLayout::Builder builder;
auto int_slot = builder.AddSlot<int>();
auto double_slot = builder.AddSlot<double>();
EXPECT_THAT(
VerifyInputSlotTypes(ToTypedSlots(int_slot, double_slot),
{GetQType<int>(), GetQType<double>()}, op_name),
IsOk());
EXPECT_THAT(
VerifyInputSlotTypes(ToTypedSlots(int_slot, double_slot),
{GetQType<int>(), GetQType<float>()}, op_name),
StatusIs(absl::StatusCode::kFailedPrecondition,
"incorrect input types for operator test.Not: expected "
"(INT32,FLOAT32), got (INT32,FLOAT64)"));
}
TEST(OperatorErrorsTest, VerifyValueTypes) {
absl::string_view op_name = "test.Not";
auto int_value = TypedValue::FromValue(57);
auto double_value = TypedValue::FromValue(5.7);
EXPECT_THAT(
VerifyInputValueTypes({int_value, double_value},
{GetQType<int>(), GetQType<double>()}, op_name),
IsOk());
EXPECT_THAT(
VerifyInputValueTypes({int_value, double_value},
{GetQType<int>(), GetQType<float>()}, op_name),
StatusIs(absl::StatusCode::kFailedPrecondition,
"incorrect input types for operator test.Not: expected "
"(INT32,FLOAT32), got (INT32,FLOAT64)"));
}
TEST(OperatorErrorsTest, GuessLibraryName) {
EXPECT_THAT(GuessLibraryName("math.add"),
Eq("
EXPECT_THAT(GuessLibraryName("math.complex.add"),
Eq("
}
TEST(OperatorErrorsTest, GuessOperatorLibraryName) {
EXPECT_THAT(GuessOperatorLibraryName("math.add"),
Eq("
EXPECT_THAT(
GuessOperatorLibraryName("math.complex.add"),
Eq("
}
}
} |
22 | #ifndef TENSORFLOW_CORE_TFRT_UTILS_DEBUG_NODE_IO_DUMP_REWRITER_H_
#define TENSORFLOW_CORE_TFRT_UTILS_DEBUG_NODE_IO_DUMP_REWRITER_H_
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace tfrt_stub {
Status InsertDumpOps(Graph& graph,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir = "");
Status InsertDumpOps(MetaGraphDef& meta_graph_def,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir = "");
}
}
#endif
#include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
absl::StatusOr<std::string> GetDumpDir(absl::string_view dump_dir) {
if (!dump_dir.empty()) return std::string(dump_dir);
const char* prefix = getenv("TF_DUMP_GRAPH_PREFIX");
if (prefix != nullptr) return std::string(prefix);
return errors::InvalidArgument("TF_DUMP_GRAPH_PREFIX not specified");
}
Status InsertDumpOpsForNode(Graph& graph, Node& node,
absl::string_view dump_dir) {
auto insert = [&](bool is_input, const std::vector<const Edge*> edges) {
for (const Edge* edge : edges) {
if (edge->IsControlEdge()) continue;
Node* dump_node;
TF_RETURN_IF_ERROR(
NodeBuilder(absl::StrCat(edge->src()->name(), "/", edge->src_output(),
"/debug_identity"),
"DebugIdentityV3")
.Attr("io_of_node", node.name())
.Attr("is_input", is_input)
.Attr("io_index",
is_input ? edge->dst_input() : edge->src_output())
.Attr("tensor_name",
absl::StrCat(edge->src()->name(), ":", edge->src_output()))
.Attr("debug_urls", {absl::StrCat("file:
.Input(edge->src(), edge->src_output())
.Finalize(&graph, &dump_node));
TF_RETURN_IF_ERROR(
graph.UpdateEdge(dump_node, 0, edge->dst(), edge->dst_input()));
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(true,
{node.in_edges().begin(), node.in_edges().end()}));
TF_RETURN_IF_ERROR(insert(
false, {node.out_edges().begin(), node.out_edges().end()}));
return absl::OkStatus();
}
}
Status InsertDumpOps(Graph& graph,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
TF_ASSIGN_OR_RETURN(auto dir, GetDumpDir(dump_dir));
auto insert = [&](Graph& graph) {
for (Node* node : graph.op_nodes()) {
if (nodes_to_dump.contains(node->name())) {
TF_RETURN_IF_ERROR(InsertDumpOpsForNode(graph, *node, dir));
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(graph));
for (const auto& fname : graph.flib_def().ListFunctionNames()) {
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*graph.flib_def().Find(fname), AttrSlice(), &graph.flib_def(), &fbody));
TF_RETURN_IF_ERROR(insert(*fbody->graph));
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*fbody->graph, fname, &new_fdef));
TF_RETURN_IF_ERROR(
graph.mutable_flib_def()->ReplaceFunction(fname, new_fdef));
}
return absl::OkStatus();
}
Status InsertDumpOps(MetaGraphDef& meta_graph_def,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph({}, meta_graph_def.graph_def(), &graph));
TF_RETURN_IF_ERROR(InsertDumpOps(graph, nodes_to_dump, dump_dir));
graph.ToGraphDef(meta_graph_def.mutable_graph_def());
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <dirent.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/tfrt/saved_model/saved_model.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr absl::string_view kDumpSubDirName = "node-io-dump";
const Node* FindNode(const Graph* graph, absl::string_view node_name) {
for (Node* node : graph->nodes()) {
if (node->name() == node_name) return node;
}
return nullptr;
}
const Node* GetInputNode(const Node* node, size_t index) {
const Node* input_node;
CHECK_OK(node->input_node(index, &input_node));
return input_node;
}
const Node* GetOutputNode(const Node* node, size_t index) {
for (const Edge* edge : node->out_edges()) {
if (edge->src_output() == index) return edge->dst();
}
return nullptr;
}
absl::StatusOr<std::vector<std::string>> GetFilenames(
absl::string_view dump_dir) {
auto dump_sub_dir = absl::StrCat(dump_dir, "/", kDumpSubDirName);
DIR* dir = opendir(dump_sub_dir.data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", dump_sub_dir));
}
std::vector<std::string> step_dirs;
struct dirent* entry;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type != DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found non-directory entry under dump_sub_dir: ", entry->d_name));
}
step_dirs.push_back(absl::StrCat(dump_sub_dir, "/", entry->d_name));
}
closedir(dir);
CHECK_EQ(step_dirs.size(), 1);
dir = opendir(step_dirs[0].data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", step_dirs[0]));
}
std::vector<std::string> filenames;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type == DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found directory entry under step_dir: ", entry->d_name));
}
filenames.push_back(entry->d_name);
}
closedir(dir);
return filenames;
}
TEST(NodeIoDumpRewriterTest, OnGraph) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input_a = ops::Placeholder(scope.WithOpName("input_a"), DT_INT32);
auto input_b = ops::Placeholder(scope.WithOpName("input_b"), DT_INT32);
auto add = ops::Add(scope.WithOpName("add"), input_a, input_b);
auto output = ops::Identity(scope.WithOpName("output"), add);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnGraph");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(*graph, {"add"}, dump_dir));
auto* node = FindNode(graph.get(), "add");
EXPECT_EQ(node->num_inputs(), 2);
EXPECT_EQ(GetInputNode(node, 0)->name(), "input_a/0/debug_identity");
EXPECT_EQ(GetInputNode(node, 1)->name(), "input_b/0/debug_identity");
EXPECT_EQ(node->num_outputs(), 1);
EXPECT_EQ(GetOutputNode(node, 0)->name(), "add/0/debug_identity");
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV1) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV1");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"Add"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "another_toy", inputs, &outputs));
ASSERT_EQ(outputs.size(), 2);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[1]),
::testing::ElementsAreArray({12}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "Add:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "Add:in:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "Add:in:1_"));
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV2) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v2");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV2");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"result"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "serving_default", inputs, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "result:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "result:in:1_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "result:in:0_"));
}
}
}
} |
23 | #ifndef TENSORFLOW_DTENSOR_MLIR_SPMD_EXPANDER_H_
#define TENSORFLOW_DTENSOR_MLIR_SPMD_EXPANDER_H_
#include <memory>
#include <string>
#include "absl/types/optional.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/UseDefLists.h"
#include "tensorflow/core/framework/registration/registration.h"
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
#include "tensorflow/dtensor/mlir/spmd_expander_common.h"
namespace tensorflow {
namespace dtensor {
class SPMDExpanderBase {
public:
virtual ~SPMDExpanderBase() = default;
virtual StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) = 0;
virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts);
virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts,
const llvm::DenseMap<int, Layout>& output_layouts);
virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts);
virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts,
const llvm::DenseMap<int, Layout>& output_layouts);
Status ExpandOpAndSetLayout(mlir::Operation* op, mlir::Operation** output);
};
Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output);
class SPMDExpanderRegistry {
public:
~SPMDExpanderRegistry() = default;
static SPMDExpanderRegistry* Global();
bool IsOpSupported(const std::string& full_op_name) {
return GetPropagateFnForFullOpName(full_op_name) != nullptr;
}
SPMDExpanderBase* GetPropagateFnForOp(mlir::Operation* op);
SPMDExpanderBase* GetPropagateFnForFullOpName(
const std::string& full_op_name);
InitOnStartupMarker RegisterPropagateFn(
std::string opName, std::unique_ptr<SPMDExpanderBase> prop);
private:
absl::flat_hash_map<std::string, std::unique_ptr<SPMDExpanderBase>>
op_to_propagate_fn_map_;
};
#define REGISTER_SPMD(name, op, prop, ...) \
static ::tensorflow::InitOnStartupMarker const spmd_##name = \
InitOnStartupMarker{} \
<< dtensor::SPMDExpanderRegistry::Global()->RegisterPropagateFn( \
mlir::op::getOperationName().str(), \
std::make_unique<prop>(__VA_ARGS__))
#define REGISTER_SPMD_BY_OP_NAME(expander_name, op_name, prop, ...) \
static ::tensorflow::InitOnStartupMarker const spmd_##expander_name = \
InitOnStartupMarker{} \
<< dtensor::SPMDExpanderRegistry::Global()->RegisterPropagateFn( \
op_name, std::make_unique<prop>(__VA_ARGS__))
}
}
#endif
#include "tensorflow/dtensor/mlir/spmd_expander.h"
#include <climits>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "llvm/ADT/DenseMap.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/dtensor/cc/constants.h"
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/dtensor_utils.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
#include "tensorflow/dtensor/mlir/collectives.h"
#include "tensorflow/dtensor/mlir/expansions/replicated_spmd_expander.h"
#include "tensorflow/dtensor/mlir/ir/tf_dtensor.h"
#include "tensorflow/dtensor/mlir/layout_parsing.h"
#include "tensorflow/dtensor/mlir/op_utils.h"
#include "tensorflow/dtensor/mlir/shape_utils.h"
#include "tensorflow/dtensor/mlir/spmd_expander_common.h"
#include "tensorflow/dtensor/proto/layout.pb.h"
namespace tensorflow {
namespace dtensor {
namespace {
Status AdjustPartedLayout(const llvm::DenseMap<int, Layout>& input_layouts,
llvm::DenseMap<int, Layout>* computed_layouts) {
bool input_has_parted_layout = false;
for (const auto& input_layout : input_layouts) {
if (input_layout.second.type() == Layout::LayoutType::kParted) {
input_has_parted_layout = true;
break;
}
}
if (input_has_parted_layout) {
for (auto& computed_layout : *computed_layouts) {
TF_ASSIGN_OR_RETURN(Layout parted, computed_layout.second.ToParted());
computed_layout.getSecond() = parted;
}
}
return absl::OkStatus();
}
bool SkipExpansionForPartedLayout(mlir::Operation* op) {
if (llvm::isa<mlir::func::ReturnOp, mlir::tf_device::ReturnOp>(op)) {
return false;
}
auto status_or_input_layouts = ExtractRequiredLayoutFromOperands(op);
if (!status_or_input_layouts.ok()) {
return false;
}
bool operand_uses_parted_layout = false;
for (const auto& layout : status_or_input_layouts.value()) {
if (layout.type() == Layout::LayoutType::kParted) {
operand_uses_parted_layout = true;
break;
}
}
return operand_uses_parted_layout;
}
}
SPMDExpanderRegistry* SPMDExpanderRegistry::Global() {
static SPMDExpanderRegistry* registry = new SPMDExpanderRegistry();
return registry;
}
SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForFullOpName(
const std::string& full_op_name) {
auto key = full_op_name;
auto fn = op_to_propagate_fn_map_.find(key);
if (fn == op_to_propagate_fn_map_.end()) {
if (EnableReplicatedSpmdAsDefault(key)) {
LOG(WARNING)
<< full_op_name << " is defaulting to ReplicatedOpSPMDExpander. This "
<< " has performance implications as all inputs and outputs "
<< " will be replicated if they are not already. Please file a "
<< " feature request to TF DTensor to implement an efficient "
<< " SPMD for this operation.";
RegisterPropagateFn(key, std::make_unique<ReplicatedOpSPMDExpander>(
true));
return op_to_propagate_fn_map_.find(key)->second.get();
} else {
return nullptr;
}
}
return fn->second.get();
}
SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForOp(
mlir::Operation* op) {
return GetPropagateFnForFullOpName(OpName(op));
}
InitOnStartupMarker SPMDExpanderRegistry::RegisterPropagateFn(
std::string opName, std::unique_ptr<SPMDExpanderBase> prop) {
CHECK(op_to_propagate_fn_map_
.insert_or_assign(opName, std::move(prop))
.second);
return {};
}
Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op,
mlir::Operation** output) {
TF_ASSIGN_OR_RETURN(std::vector<std::optional<Layout>> computed_layout,
ExtractLayoutFromOp(op));
if (computed_layout.empty() && op->getNumResults() != 0) {
return errors::InvalidArgument(
absl::StrCat("No attached layout found for op : ", OpName(op),
" This might be due to an error in layout propagation.")
.c_str());
}
TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op));
bool skip_expansion_for_parted_layout = SkipExpansionForPartedLayout(op);
if (mesh.IsSingleDevice() || mesh.use_xla_spmd() ||
skip_expansion_for_parted_layout) {
if (skip_expansion_for_parted_layout) {
*output = InferSPMDExpandedLocalShape(op);
} else {
*output = op;
}
SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>(
computed_layout.data(), computed_layout.size()));
return absl::OkStatus();
}
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> global_output_shapes;
global_output_shapes.reserve(op->getNumResults());
for (auto output_value : op->getResults()) {
auto maybe_ranked =
mlir::dyn_cast<mlir::RankedTensorType>(output_value.getType());
if (llvm::isa<mlir::TF::RestoreV2Op, mlir::TF::DTensorRestoreV2Op>(op) &&
(!maybe_ranked || !maybe_ranked.hasStaticShape()))
continue;
TF_ASSIGN_OR_RETURN(auto global_shape,
ExtractGlobalOutputShape(output_value));
global_output_shapes.emplace_back(llvm::SmallVector<int64_t, 4>{
global_shape.begin(), global_shape.end()});
}
TF_ASSIGN_OR_RETURN(*output, this->ExpandOp(op));
SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>(
computed_layout.data(), computed_layout.size()));
for (const auto& output_layout_and_index :
llvm::enumerate(llvm::zip((*output)->getResults(), computed_layout))) {
const int index = output_layout_and_index.index();
const auto& output_and_layout = output_layout_and_index.value();
auto output_value = std::get<0>(output_and_layout);
auto local_expanded_shape_or_status = GetShapeOfValue(output_value);
if (!local_expanded_shape_or_status.ok()) continue;
const auto local_expanded_shape = local_expanded_shape_or_status.value();
const auto& layout = std::get<1>(output_and_layout);
const auto expected_global_shape =
layout->GlobalShapeFromLocalShape(local_expanded_shape);
for (const auto& expanded_and_true_global_shape :
llvm::zip(global_output_shapes[index], expected_global_shape)) {
const auto expanded_shape = std::get<0>(expanded_and_true_global_shape);
const auto expected_shape = std::get<1>(expanded_and_true_global_shape);
if (expanded_shape <= 0 || expected_shape <= 0) continue;
if (expanded_shape != expected_shape) {
return errors::Internal(
"SPMD expansion resulted in op output inconsistent with the "
"provided layout. Expected shape: <",
absl::StrJoin(expected_global_shape, ","), "> got shape: <",
absl::StrJoin(global_output_shapes[index], ","), ">");
}
}
}
return absl::OkStatus();
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) {
return errors::Unimplemented(
"ComputeLayoutForward API must be implemented via the subclass.");
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts,
const llvm::DenseMap<int, Layout>& output_layouts) {
TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op));
if (mesh.IsSingleDevice()) {
TF_ASSIGN_OR_RETURN(
Layout layout,
Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh));
auto layouts = llvm::DenseMap<int, Layout>{};
for (int i = 0; i < op->getNumResults(); ++i) {
layouts.insert({i, layout});
}
return layouts;
}
TF_ASSIGN_OR_RETURN(auto layouts, ComputeLayoutForward(op, input_layouts));
TF_RETURN_IF_ERROR(AdjustPartedLayout(input_layouts, &layouts));
return layouts;
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts) {
return errors::Unimplemented(
"ComputeLayoutBackward API must be implemented via the subclass.");
}
StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward(
mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts,
const llvm::DenseMap<int, Layout>& output_layouts) {
TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op));
if (mesh.IsSingleDevice()) {
TF_ASSIGN_OR_RETURN(
Layout layout,
Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh));
auto layouts = llvm::DenseMap<int, Layout>{};
for (int i = 0; i < op->getNumOperands(); ++i) {
layouts.insert({i, layout});
}
return layouts;
}
return ComputeLayoutBackward(op, output_layouts);
}
Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output) {
SPMDExpanderBase* expander =
SPMDExpanderRegistry::Global()->GetPropagateFnForOp(op);
if (expander != nullptr) {
return expander->ExpandOpAndSetLayout(op, output);
} else {
VLOG(1) << "No expansion found for " << OpName(op) << "\n";
*output = op;
}
return absl::OkStatus();
}
}
} | #include "tensorflow/dtensor/mlir/spmd_expander.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/DenseMap.h"
#include "mlir/IR/Operation.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/dtensor/cc/dstatus.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
namespace tensorflow {
namespace dtensor {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
class DummyExpander : public SPMDExpanderBase {
StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override {
return errors::Unimplemented("");
}
StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward(
mlir::Operation* op,
const llvm::DenseMap<int, Layout>& input_layouts) override {
return errors::Unimplemented("");
}
StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward(
mlir::Operation* op,
const llvm::DenseMap<int, Layout>& output_layouts) override {
return errors::Unimplemented("");
}
};
class SPMDExpanderRegistryTest : public ::testing::Test {
public:
SPMDExpanderRegistryTest() {
registry_.RegisterPropagateFn(mlir::TF::AddOp::getOperationName().str(),
std::make_unique<DummyExpander>());
}
protected:
SPMDExpanderRegistry registry_;
};
TEST_F(SPMDExpanderRegistryTest, LookupFromOpName) {
EXPECT_THAT(registry_.GetPropagateFnForFullOpName("tf.Add"), NotNull());
EXPECT_THAT(registry_.GetPropagateFnForFullOpName("Unknown"), IsNull());
}
}
}
} |
24 | #ifndef QUICHE_QUIC_CORE_UBER_RECEIVED_PACKET_MANAGER_H_
#define QUICHE_QUIC_CORE_UBER_RECEIVED_PACKET_MANAGER_H_
#include "quiche/quic/core/frames/quic_ack_frequency_frame.h"
#include "quiche/quic/core/quic_received_packet_manager.h"
namespace quic {
class QUICHE_EXPORT UberReceivedPacketManager {
public:
explicit UberReceivedPacketManager(QuicConnectionStats* stats);
UberReceivedPacketManager(const UberReceivedPacketManager&) = delete;
UberReceivedPacketManager& operator=(const UberReceivedPacketManager&) =
delete;
virtual ~UberReceivedPacketManager();
void SetFromConfig(const QuicConfig& config, Perspective perspective);
bool IsAwaitingPacket(EncryptionLevel decrypted_packet_level,
QuicPacketNumber packet_number) const;
void RecordPacketReceived(EncryptionLevel decrypted_packet_level,
const QuicPacketHeader& header,
QuicTime receipt_time,
QuicEcnCodepoint ecn_codepoint);
const QuicFrame GetUpdatedAckFrame(PacketNumberSpace packet_number_space,
QuicTime approximate_now);
void DontWaitForPacketsBefore(EncryptionLevel decrypted_packet_level,
QuicPacketNumber least_unacked);
void MaybeUpdateAckTimeout(bool should_last_packet_instigate_acks,
EncryptionLevel decrypted_packet_level,
QuicPacketNumber last_received_packet_number,
QuicTime last_packet_receipt_time, QuicTime now,
const RttStats* rtt_stats);
void ResetAckStates(EncryptionLevel encryption_level);
void EnableMultiplePacketNumberSpacesSupport(Perspective perspective);
bool IsAckFrameUpdated() const;
QuicPacketNumber GetLargestObserved(
EncryptionLevel decrypted_packet_level) const;
QuicTime GetAckTimeout(PacketNumberSpace packet_number_space) const;
QuicTime GetEarliestAckTimeout() const;
bool IsAckFrameEmpty(PacketNumberSpace packet_number_space) const;
size_t min_received_before_ack_decimation() const;
void set_min_received_before_ack_decimation(size_t new_value);
void set_ack_frequency(size_t new_value);
bool supports_multiple_packet_number_spaces() const {
return supports_multiple_packet_number_spaces_;
}
const QuicAckFrame& ack_frame() const;
const QuicAckFrame& GetAckFrame(PacketNumberSpace packet_number_space) const;
void set_max_ack_ranges(size_t max_ack_ranges);
void OnAckFrequencyFrame(const QuicAckFrequencyFrame& frame);
void set_save_timestamps(bool save_timestamps);
private:
friend class test::QuicConnectionPeer;
friend class test::UberReceivedPacketManagerPeer;
QuicReceivedPacketManager received_packet_managers_[NUM_PACKET_NUMBER_SPACES];
bool supports_multiple_packet_number_spaces_;
};
}
#endif
#include "quiche/quic/core/uber_received_packet_manager.h"
#include <algorithm>
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
namespace quic {
UberReceivedPacketManager::UberReceivedPacketManager(QuicConnectionStats* stats)
: supports_multiple_packet_number_spaces_(false) {
for (auto& received_packet_manager : received_packet_managers_) {
received_packet_manager.set_connection_stats(stats);
}
}
UberReceivedPacketManager::~UberReceivedPacketManager() {}
void UberReceivedPacketManager::SetFromConfig(const QuicConfig& config,
Perspective perspective) {
for (auto& received_packet_manager : received_packet_managers_) {
received_packet_manager.SetFromConfig(config, perspective);
}
}
bool UberReceivedPacketManager::IsAwaitingPacket(
EncryptionLevel decrypted_packet_level,
QuicPacketNumber packet_number) const {
if (!supports_multiple_packet_number_spaces_) {
return received_packet_managers_[0].IsAwaitingPacket(packet_number);
}
return received_packet_managers_[QuicUtils::GetPacketNumberSpace(
decrypted_packet_level)]
.IsAwaitingPacket(packet_number);
}
const QuicFrame UberReceivedPacketManager::GetUpdatedAckFrame(
PacketNumberSpace packet_number_space, QuicTime approximate_now) {
if (!supports_multiple_packet_number_spaces_) {
return received_packet_managers_[0].GetUpdatedAckFrame(approximate_now);
}
return received_packet_managers_[packet_number_space].GetUpdatedAckFrame(
approximate_now);
}
void UberReceivedPacketManager::RecordPacketReceived(
EncryptionLevel decrypted_packet_level, const QuicPacketHeader& header,
QuicTime receipt_time, QuicEcnCodepoint ecn_codepoint) {
if (!supports_multiple_packet_number_spaces_) {
received_packet_managers_[0].RecordPacketReceived(header, receipt_time,
ecn_codepoint);
return;
}
received_packet_managers_[QuicUtils::GetPacketNumberSpace(
decrypted_packet_level)]
.RecordPacketReceived(header, receipt_time, ecn_codepoint);
}
void UberReceivedPacketManager::DontWaitForPacketsBefore(
EncryptionLevel decrypted_packet_level, QuicPacketNumber least_unacked) {
if (!supports_multiple_packet_number_spaces_) {
received_packet_managers_[0].DontWaitForPacketsBefore(least_unacked);
return;
}
received_packet_managers_[QuicUtils::GetPacketNumberSpace(
decrypted_packet_level)]
.DontWaitForPacketsBefore(least_unacked);
}
void UberReceivedPacketManager::MaybeUpdateAckTimeout(
bool should_last_packet_instigate_acks,
EncryptionLevel decrypted_packet_level,
QuicPacketNumber last_received_packet_number,
QuicTime last_packet_receipt_time, QuicTime now,
const RttStats* rtt_stats) {
if (!supports_multiple_packet_number_spaces_) {
received_packet_managers_[0].MaybeUpdateAckTimeout(
should_last_packet_instigate_acks, last_received_packet_number,
last_packet_receipt_time, now, rtt_stats);
return;
}
received_packet_managers_[QuicUtils::GetPacketNumberSpace(
decrypted_packet_level)]
.MaybeUpdateAckTimeout(should_last_packet_instigate_acks,
last_received_packet_number,
last_packet_receipt_time, now, rtt_stats);
}
void UberReceivedPacketManager::ResetAckStates(
EncryptionLevel encryption_level) {
if (!supports_multiple_packet_number_spaces_) {
received_packet_managers_[0].ResetAckStates();
return;
}
received_packet_managers_[QuicUtils::GetPacketNumberSpace(encryption_level)]
.ResetAckStates();
if (encryption_level == ENCRYPTION_INITIAL) {
received_packet_managers_[INITIAL_DATA].set_local_max_ack_delay(
kAlarmGranularity);
}
}
void UberReceivedPacketManager::EnableMultiplePacketNumberSpacesSupport(
Perspective perspective) {
if (supports_multiple_packet_number_spaces_) {
QUIC_BUG(quic_bug_10495_1)
<< "Multiple packet number spaces has already been enabled";
return;
}
if (received_packet_managers_[0].GetLargestObserved().IsInitialized()) {
QUIC_BUG(quic_bug_10495_2)
<< "Try to enable multiple packet number spaces support after any "
"packet has been received.";
return;
}
if (perspective == Perspective::IS_CLIENT) {
received_packet_managers_[INITIAL_DATA].set_local_max_ack_delay(
kAlarmGranularity);
}
received_packet_managers_[HANDSHAKE_DATA].set_local_max_ack_delay(
kAlarmGranularity);
supports_multiple_packet_number_spaces_ = true;
}
bool UberReceivedPacketManager::IsAckFrameUpdated() const {
if (!supports_multiple_packet_number_spaces_) {
return received_packet_managers_[0].ack_frame_updated();
}
for (const auto& received_packet_manager : received_packet_managers_) {
if (received_packet_manager.ack_frame_updated()) {
return true;
}
}
return false;
}
QuicPacketNumber UberReceivedPacketManager::GetLargestObserved(
EncryptionLevel decrypted_packet_level) const {
if (!supports_multiple_packet_number_spaces_) {
return received_packet_managers_[0].GetLargestObserved();
}
return received_packet_managers_[QuicUtils::GetPacketNumberSpace(
decrypted_packet_level)]
.GetLargestObserved();
}
QuicTime UberReceivedPacketManager::GetAckTimeout(
PacketNumberSpace packet_number_space) const {
if (!supports_multiple_packet_number_spaces_) {
return received_packet_managers_[0].ack_timeout();
}
return received_packet_managers_[packet_number_space].ack_timeout();
}
QuicTime UberReceivedPacketManager::GetEarliestAckTimeout() const {
QuicTime ack_timeout = QuicTime::Zero();
for (const auto& received_packet_manager : received_packet_managers_) {
const QuicTime timeout = received_packet_manager.ack_timeout();
if (!ack_timeout.IsInitialized()) {
ack_timeout = timeout;
continue;
}
if (timeout.IsInitialized()) {
ack_timeout = std::min(ack_timeout, timeout);
}
}
return ack_timeout;
}
bool UberReceivedPacketManager::IsAckFrameEmpty(
PacketNumberSpace packet_number_space) const {
if (!supports_multiple_packet_number_spaces_) {
return received_packet_managers_[0].IsAckFrameEmpty();
}
return received_packet_managers_[packet_number_space].IsAckFrameEmpty();
}
size_t UberReceivedPacketManager::min_received_before_ack_decimation() const {
return received_packet_managers_[0].min_received_before_ack_decimation();
}
void UberReceivedPacketManager::set_min_received_before_ack_decimation(
size_t new_value) {
for (auto& received_packet_manager : received_packet_managers_) {
received_packet_manager.set_min_received_before_ack_decimation(new_value);
}
}
void UberReceivedPacketManager::set_ack_frequency(size_t new_value) {
for (auto& received_packet_manager : received_packet_managers_) {
received_packet_manager.set_ack_frequency(new_value);
}
}
const QuicAckFrame& UberReceivedPacketManager::ack_frame() const {
QUICHE_DCHECK(!supports_multiple_packet_number_spaces_);
return received_packet_managers_[0].ack_frame();
}
const QuicAckFrame& UberReceivedPacketManager::GetAckFrame(
PacketNumberSpace packet_number_space) const {
QUICHE_DCHECK(supports_multiple_packet_number_spaces_);
return received_packet_managers_[packet_number_space].ack_frame();
}
void UberReceivedPacketManager::set_max_ack_ranges(size_t max_ack_ranges) {
for (auto& received_packet_manager : received_packet_managers_) {
received_packet_manager.set_max_ack_ranges(max_ack_ranges);
}
}
void UberReceivedPacketManager::set_save_timestamps(bool save_timestamps) {
for (auto& received_packet_manager : received_packet_managers_) {
received_packet_manager.set_save_timestamps(
save_timestamps, supports_multiple_packet_number_spaces_);
}
}
void UberReceivedPacketManager::OnAckFrequencyFrame(
const QuicAckFrequencyFrame& frame) {
if (!supports_multiple_packet_number_spaces_) {
QUIC_BUG(quic_bug_10495_3)
<< "Received AckFrequencyFrame when multiple packet number spaces "
"is not supported";
return;
}
received_packet_managers_[APPLICATION_DATA].OnAckFrequencyFrame(frame);
}
} | #include "quiche/quic/core/uber_received_packet_manager.h"
#include <algorithm>
#include <memory>
#include <utility>
#include "quiche/quic/core/congestion_control/rtt_stats.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
namespace test {
class UberReceivedPacketManagerPeer {
public:
static void SetAckDecimationDelay(UberReceivedPacketManager* manager,
float ack_decimation_delay) {
for (auto& received_packet_manager : manager->received_packet_managers_) {
received_packet_manager.ack_decimation_delay_ = ack_decimation_delay;
}
}
};
namespace {
const bool kInstigateAck = true;
const QuicTime::Delta kMinRttMs = QuicTime::Delta::FromMilliseconds(40);
const QuicTime::Delta kDelayedAckTime =
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
EncryptionLevel GetEncryptionLevel(PacketNumberSpace packet_number_space) {
switch (packet_number_space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
case APPLICATION_DATA:
return ENCRYPTION_FORWARD_SECURE;
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
class UberReceivedPacketManagerTest : public QuicTest {
protected:
UberReceivedPacketManagerTest() {
manager_ = std::make_unique<UberReceivedPacketManager>(&stats_);
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
rtt_stats_.UpdateRtt(kMinRttMs, QuicTime::Delta::Zero(), QuicTime::Zero());
manager_->set_save_timestamps(true);
}
void RecordPacketReceipt(uint64_t packet_number) {
RecordPacketReceipt(ENCRYPTION_FORWARD_SECURE, packet_number);
}
void RecordPacketReceipt(uint64_t packet_number, QuicTime receipt_time) {
RecordPacketReceipt(ENCRYPTION_FORWARD_SECURE, packet_number, receipt_time);
}
void RecordPacketReceipt(EncryptionLevel decrypted_packet_level,
uint64_t packet_number) {
RecordPacketReceipt(decrypted_packet_level, packet_number,
QuicTime::Zero());
}
void RecordPacketReceipt(EncryptionLevel decrypted_packet_level,
uint64_t packet_number, QuicTime receipt_time) {
QuicPacketHeader header;
header.packet_number = QuicPacketNumber(packet_number);
manager_->RecordPacketReceived(decrypted_packet_level, header, receipt_time,
ECN_NOT_ECT);
}
bool HasPendingAck() {
if (!manager_->supports_multiple_packet_number_spaces()) {
return manager_->GetAckTimeout(APPLICATION_DATA).IsInitialized();
}
return manager_->GetEarliestAckTimeout().IsInitialized();
}
void MaybeUpdateAckTimeout(bool should_last_packet_instigate_acks,
uint64_t last_received_packet_number) {
MaybeUpdateAckTimeout(should_last_packet_instigate_acks,
ENCRYPTION_FORWARD_SECURE,
last_received_packet_number);
}
void MaybeUpdateAckTimeout(bool should_last_packet_instigate_acks,
EncryptionLevel decrypted_packet_level,
uint64_t last_received_packet_number) {
manager_->MaybeUpdateAckTimeout(
should_last_packet_instigate_acks, decrypted_packet_level,
QuicPacketNumber(last_received_packet_number), clock_.ApproximateNow(),
clock_.ApproximateNow(), &rtt_stats_);
}
void CheckAckTimeout(QuicTime time) {
QUICHE_DCHECK(HasPendingAck());
if (!manager_->supports_multiple_packet_number_spaces()) {
QUICHE_DCHECK(manager_->GetAckTimeout(APPLICATION_DATA) == time);
if (time <= clock_.ApproximateNow()) {
manager_->ResetAckStates(ENCRYPTION_FORWARD_SECURE);
QUICHE_DCHECK(!HasPendingAck());
}
return;
}
QUICHE_DCHECK(manager_->GetEarliestAckTimeout() == time);
for (int8_t i = INITIAL_DATA; i < NUM_PACKET_NUMBER_SPACES; ++i) {
const QuicTime ack_timeout =
manager_->GetAckTimeout(static_cast<PacketNumberSpace>(i));
if (!ack_timeout.IsInitialized() ||
ack_timeout > clock_.ApproximateNow()) {
continue;
}
manager_->ResetAckStates(
GetEncryptionLevel(static_cast<PacketNumberSpace>(i)));
}
}
MockClock clock_;
RttStats rtt_stats_;
QuicConnectionStats stats_;
std::unique_ptr<UberReceivedPacketManager> manager_;
};
TEST_F(UberReceivedPacketManagerTest, DontWaitForPacketsBefore) {
EXPECT_TRUE(manager_->IsAckFrameEmpty(APPLICATION_DATA));
RecordPacketReceipt(2);
EXPECT_FALSE(manager_->IsAckFrameEmpty(APPLICATION_DATA));
RecordPacketReceipt(7);
EXPECT_TRUE(manager_->IsAwaitingPacket(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(3u)));
EXPECT_TRUE(manager_->IsAwaitingPacket(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(6u)));
manager_->DontWaitForPacketsBefore(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(4));
EXPECT_FALSE(manager_->IsAwaitingPacket(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(3u)));
EXPECT_TRUE(manager_->IsAwaitingPacket(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(6u)));
}
TEST_F(UberReceivedPacketManagerTest, GetUpdatedAckFrame) {
QuicTime two_ms = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(2);
EXPECT_FALSE(manager_->IsAckFrameUpdated());
RecordPacketReceipt(2, two_ms);
EXPECT_TRUE(manager_->IsAckFrameUpdated());
QuicFrame ack =
manager_->GetUpdatedAckFrame(APPLICATION_DATA, QuicTime::Zero());
manager_->ResetAckStates(ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(manager_->IsAckFrameUpdated());
EXPECT_EQ(QuicTime::Delta::Zero(), ack.ack_frame->ack_delay_time);
EXPECT_EQ(1u, ack.ack_frame->received_packet_times.size());
QuicTime four_ms = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(4);
ack = manager_->GetUpdatedAckFrame(APPLICATION_DATA, four_ms);
manager_->ResetAckStates(ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(manager_->IsAckFrameUpdated());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(2),
ack.ack_frame->ack_delay_time);
EXPECT_EQ(1u, ack.ack_frame->received_packet_times.size());
RecordPacketReceipt(999, two_ms);
RecordPacketReceipt(4, two_ms);
RecordPacketReceipt(1000, two_ms);
EXPECT_TRUE(manager_->IsAckFrameUpdated());
ack = manager_->GetUpdatedAckFrame(APPLICATION_DATA, two_ms);
manager_->ResetAckStates(ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(manager_->IsAckFrameUpdated());
EXPECT_EQ(2u, ack.ack_frame->received_packet_times.size());
}
TEST_F(UberReceivedPacketManagerTest, UpdateReceivedConnectionStats) {
EXPECT_FALSE(manager_->IsAckFrameUpdated());
RecordPacketReceipt(1);
EXPECT_TRUE(manager_->IsAckFrameUpdated());
RecordPacketReceipt(6);
RecordPacketReceipt(2,
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(4u, stats_.max_sequence_reordering);
EXPECT_EQ(1000, stats_.max_time_reordering_us);
EXPECT_EQ(1u, stats_.packets_reordered);
}
TEST_F(UberReceivedPacketManagerTest, LimitAckRanges) {
manager_->set_max_ack_ranges(10);
EXPECT_FALSE(manager_->IsAckFrameUpdated());
for (int i = 0; i < 100; ++i) {
RecordPacketReceipt(1 + 2 * i);
EXPECT_TRUE(manager_->IsAckFrameUpdated());
manager_->GetUpdatedAckFrame(APPLICATION_DATA, QuicTime::Zero());
EXPECT_GE(10u, manager_->ack_frame().packets.NumIntervals());
EXPECT_EQ(QuicPacketNumber(1u + 2 * i),
manager_->ack_frame().packets.Max());
for (int j = 0; j < std::min(10, i + 1); ++j) {
ASSERT_GE(i, j);
EXPECT_TRUE(manager_->ack_frame().packets.Contains(
QuicPacketNumber(1 + (i - j) * 2)));
if (i > j) {
EXPECT_FALSE(manager_->ack_frame().packets.Contains(
QuicPacketNumber((i - j) * 2)));
}
}
}
}
TEST_F(UberReceivedPacketManagerTest, IgnoreOutOfOrderTimestamps) {
EXPECT_FALSE(manager_->IsAckFrameUpdated());
RecordPacketReceipt(1, QuicTime::Zero());
EXPECT_TRUE(manager_->IsAckFrameUpdated());
EXPECT_EQ(1u, manager_->ack_frame().received_packet_times.size());
RecordPacketReceipt(2,
QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(2u, manager_->ack_frame().received_packet_times.size());
RecordPacketReceipt(3, QuicTime::Zero());
EXPECT_EQ(2u, manager_->ack_frame().received_packet_times.size());
}
TEST_F(UberReceivedPacketManagerTest, OutOfOrderReceiptCausesAckSent) {
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(3, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 3);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 2);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 1);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(4, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 4);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
TEST_F(UberReceivedPacketManagerTest, OutOfOrderAckReceiptCausesNoAck) {
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 2);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 1);
EXPECT_FALSE(HasPendingAck());
}
TEST_F(UberReceivedPacketManagerTest, AckReceiptCausesAckSend) {
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(1, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 1);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(2, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 2);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(3, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 3);
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
clock_.AdvanceTime(kDelayedAckTime);
CheckAckTimeout(clock_.ApproximateNow());
RecordPacketReceipt(4, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 4);
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(5, clock_.ApproximateNow());
MaybeUpdateAckTimeout(!kInstigateAck, 5);
EXPECT_FALSE(HasPendingAck());
}
TEST_F(UberReceivedPacketManagerTest, AckSentEveryNthPacket) {
EXPECT_FALSE(HasPendingAck());
manager_->set_ack_frequency(3);
for (size_t i = 1; i <= 39; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 3 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
}
TEST_F(UberReceivedPacketManagerTest, AckDecimationReducesAcks) {
EXPECT_FALSE(HasPendingAck());
manager_->set_min_received_before_ack_decimation(10);
for (size_t i = 1; i <= 29; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i <= 10) {
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
continue;
}
if (i == 20) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kMinRttMs * 0.25);
}
}
RecordPacketReceipt(30, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, 30);
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(UberReceivedPacketManagerTest, SendDelayedAckDecimation) {
EXPECT_FALSE(HasPendingAck());
QuicTime ack_time = clock_.ApproximateNow() + kMinRttMs * 0.25;
uint64_t kFirstDecimatedPacket = 101;
for (uint64_t i = 1; i < kFirstDecimatedPacket; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
RecordPacketReceipt(kFirstDecimatedPacket, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket);
CheckAckTimeout(ack_time);
for (uint64_t i = 1; i < 10; ++i) {
RecordPacketReceipt(kFirstDecimatedPacket + i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket + i);
}
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(UberReceivedPacketManagerTest,
SendDelayedAckDecimationUnlimitedAggregation) {
EXPECT_FALSE(HasPendingAck());
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kAKDU);
config.SetConnectionOptionsToSend(connection_options);
manager_->SetFromConfig(config, Perspective::IS_CLIENT);
QuicTime ack_time = clock_.ApproximateNow() + kMinRttMs * 0.25;
uint64_t kFirstDecimatedPacket = 101;
for (uint64_t i = 1; i < kFirstDecimatedPacket; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
RecordPacketReceipt(kFirstDecimatedPacket, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket);
CheckAckTimeout(ack_time);
for (int i = 1; i <= 18; ++i) {
RecordPacketReceipt(kFirstDecimatedPacket + i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket + i);
}
CheckAckTimeout(ack_time);
}
TEST_F(UberReceivedPacketManagerTest, SendDelayedAckDecimationEighthRtt) {
EXPECT_FALSE(HasPendingAck());
UberReceivedPacketManagerPeer::SetAckDecimationDelay(manager_.get(), 0.125);
QuicTime ack_time = clock_.ApproximateNow() + kMinRttMs * 0.125;
uint64_t kFirstDecimatedPacket = 101;
for (uint64_t i = 1; i < kFirstDecimatedPacket; ++i) {
RecordPacketReceipt(i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, i);
if (i % 2 == 0) {
CheckAckTimeout(clock_.ApproximateNow());
} else {
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
}
}
RecordPacketReceipt(kFirstDecimatedPacket, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket);
CheckAckTimeout(ack_time);
for (uint64_t i = 1; i < 10; ++i) {
RecordPacketReceipt(kFirstDecimatedPacket + i, clock_.ApproximateNow());
MaybeUpdateAckTimeout(kInstigateAck, kFirstDecimatedPacket + i);
}
CheckAckTimeout(clock_.ApproximateNow());
}
TEST_F(UberReceivedPacketManagerTest,
DontWaitForPacketsBeforeMultiplePacketNumberSpaces) {
manager_->EnableMultiplePacketNumberSpacesSupport(Perspective::IS_CLIENT);
EXPECT_FALSE(
manager_->GetLargestObserved(ENCRYPTION_HANDSHAKE).IsInitialized());
EXPECT_FALSE(
manager_->GetLargestObserved(ENCRYPTION_FORWARD_SECURE).IsInitialized());
RecordPacketReceipt(ENCRYPTION_HANDSHAKE, 2);
RecordPacketReceipt(ENCRYPTION_HANDSHAKE, 4);
RecordPacketReceipt(ENCRYPTION_FORWARD_SECURE, 3);
RecordPacketReceipt(ENCRYPTION_FORWARD_SECURE, 7);
EXPECT_EQ(QuicPacketNumber(4),
manager_->GetLargestObserved(ENCRYPTION_HANDSHAKE));
EXPECT_EQ(QuicPacketNumber(7),
manager_->GetLargestObserved(ENCRYPTION_FORWARD_SECURE));
EXPECT_TRUE(
manager_->IsAwaitingPacket(ENCRYPTION_HANDSHAKE, QuicPacketNumber(3)));
EXPECT_FALSE(manager_->IsAwaitingPacket(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(3)));
EXPECT_TRUE(manager_->IsAwaitingPacket(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(4)));
manager_->DontWaitForPacketsBefore(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(5));
EXPECT_TRUE(
manager_->IsAwaitingPacket(ENCRYPTION_HANDSHAKE, QuicPacketNumber(3)));
EXPECT_FALSE(manager_->IsAwaitingPacket(ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(4)));
}
TEST_F(UberReceivedPacketManagerTest, AckSendingDifferentPacketNumberSpaces) {
manager_->EnableMultiplePacketNumberSpacesSupport(Perspective::IS_SERVER);
EXPECT_FALSE(HasPendingAck());
EXPECT_FALSE(manager_->IsAckFrameUpdated());
RecordPacketReceipt(ENCRYPTION_INITIAL, 3);
EXPECT_TRUE(manager_->IsAckFrameUpdated());
MaybeUpdateAckTimeout(kInstigateAck, ENCRYPTION_INITIAL, 3);
EXPECT_TRUE(HasPendingAck());
CheckAckTimeout(clock_.ApproximateNow() +
QuicTime::Delta::FromMilliseconds(25));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(25));
CheckAckTimeout(clock_.ApproximateNow());
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(ENCRYPTION_INITIAL, 4);
EXPECT_TRUE(manager_->IsAckFrameUpdated());
MaybeUpdateAckTimeout(kInstigateAck, ENCRYPTION_INITIAL, 4);
EXPECT_TRUE(HasPendingAck());
CheckAckTimeout(clock_.ApproximateNow() +
QuicTime::Delta::FromMilliseconds(1));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
CheckAckTimeout(clock_.ApproximateNow());
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(ENCRYPTION_HANDSHAKE, 3);
EXPECT_TRUE(manager_->IsAckFrameUpdated());
MaybeUpdateAckTimeout(kInstigateAck, ENCRYPTION_HANDSHAKE, 3);
EXPECT_TRUE(HasPendingAck());
CheckAckTimeout(clock_.ApproximateNow() +
QuicTime::Delta::FromMilliseconds(1));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
CheckAckTimeout(clock_.ApproximateNow());
EXPECT_FALSE(HasPendingAck());
RecordPacketReceipt(ENCRYPTION_FORWARD_SECURE, 3);
MaybeUpdateAckTimeout(kInstigateAck, ENCRYPTION_FORWARD_SECURE, 3);
EXPECT_TRUE(HasPendingAck());
CheckAckTimeout(clock_.ApproximateNow() + kDelayedAckTime);
RecordPacketReceipt(ENCRYPTION_FORWARD_SECURE, 2);
MaybeUpdateAckTimeout(kInstigateAck, ENCRYPTION_FORWARD_SECURE, 2);
CheckAckTimeout(clock_.ApproximateNow());
EXPECT_FALSE(HasPendingAck());
}
TEST_F(UberReceivedPacketManagerTest,
AckTimeoutForPreviouslyUndecryptablePackets) {
manager_->EnableMultiplePacketNumberSpacesSupport(Perspective::IS_SERVER);
EXPECT_FALSE(HasPendingAck());
EXPECT_FALSE(manager_->IsAckFrameUpdated());
const QuicTime packet_receipt_time4 = clock_.ApproximateNow();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
RecordPacketReceipt(ENCRYPTION_HANDSHAKE, 5);
MaybeUpdateAckTimeout(kInstigateAck, ENCRYPTION_HANDSHAKE, 5);
EXPECT_TRUE(HasPendingAck());
RecordPacketReceipt(ENCRYPTION_FORWARD_SECURE, 4);
manager_->MaybeUpdateAckTimeout(kInstigateAck, ENCRYPTION_FORWARD_SECURE,
QuicPacketNumber(4), packet_receipt_time4,
clock_.ApproximateNow(), &rtt_stats_);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
CheckAckTimeout(clock_.ApproximateNow());
EXPECT_TRUE(HasPendingAck());
CheckAckTimeout(clock_.ApproximateNow() -
QuicTime::Delta::FromMilliseconds(11) + kDelayedAckTime);
}
}
}
} |
25 | #ifndef AROLLA_DECISION_FOREST_BATCHED_EVALUATION_BATCHED_FOREST_EVALUATOR_H_
#define AROLLA_DECISION_FOREST_BATCHED_EVALUATION_BATCHED_FOREST_EVALUATOR_H_
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/indestructible.h"
#include "arolla/util/threading.h"
namespace arolla {
class BatchedForestEvaluator {
public:
struct CompilationParams {
static constexpr CompilationParams Default() { return {}; }
int64_t optimal_splits_per_evaluator = 500000;
};
struct SlotMapping {
int input_index;
TypedSlot pointwise_slot;
};
static absl::StatusOr<std::unique_ptr<BatchedForestEvaluator>> Compile(
const DecisionForest& decision_forest,
absl::Span<const TreeFilter> groups = {{}},
const CompilationParams& params = CompilationParams::Default());
absl::Status EvalBatch(absl::Span<const TypedSlot> input_slots,
absl::Span<const TypedSlot> output_slots,
FramePtr frame,
RawBufferFactory* = GetHeapBufferFactory(),
std::optional<int64_t> row_count = {}) const;
static void SetThreading(std::unique_ptr<ThreadingInterface> threading,
int64_t min_rows_per_thread = 128) {
*threading_ = std::move(threading);
min_rows_per_thread_ = min_rows_per_thread;
}
private:
static ::arolla::Indestructible<std::unique_ptr<ThreadingInterface>>
threading_;
static int64_t min_rows_per_thread_;
BatchedForestEvaluator(FrameLayout&& pointwise_layout,
std::vector<SlotMapping>&& input_mapping,
std::vector<TypedSlot>&& output_pointwise_slots,
std::vector<ForestEvaluator>&& pointwise_evaluators)
: pointwise_layout_(std::move(pointwise_layout)),
input_mapping_(std::move(input_mapping)),
output_pointwise_slots_(output_pointwise_slots),
pointwise_evaluators_(std::move(pointwise_evaluators)) {
input_pointwise_slots_.reserve(input_mapping_.size());
input_count_ = 0;
for (const auto& m : input_mapping_) {
input_pointwise_slots_.push_back(m.pointwise_slot);
input_count_ = std::max(input_count_, m.input_index + 1);
}
}
absl::Status GetInputsFromSlots(absl::Span<const TypedSlot> input_slots,
ConstFramePtr frame,
std::vector<TypedRef>* input_arrays) const;
FrameLayout pointwise_layout_;
std::vector<SlotMapping> input_mapping_;
std::vector<TypedSlot> input_pointwise_slots_;
std::vector<TypedSlot> output_pointwise_slots_;
int input_count_;
std::vector<ForestEvaluator> pointwise_evaluators_;
};
}
#endif
#include "arolla/decision_forest/batched_evaluation/batched_forest_evaluator.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/array_like/frame_iter.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/indestructible.h"
#include "arolla/util/threading.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::StatusOr<TypedValue> AddFullFloatArrays(TypedRef a, TypedRef b) {
if (a.GetType() == GetDenseArrayQType<float>() &&
b.GetType() == GetDenseArrayQType<float>()) {
const auto& va = a.UnsafeAs<DenseArray<float>>();
const auto& vb = b.UnsafeAs<DenseArray<float>>();
DCHECK_EQ(va.size(), vb.size());
DCHECK(va.IsFull() && vb.IsFull());
Buffer<float>::Builder bldr(va.size());
auto sa = va.values.span();
auto sb = vb.values.span();
auto sr = bldr.GetMutableSpan();
for (int64_t i = 0; i < va.size(); ++i) {
sr[i] = sa[i] + sb[i];
}
return TypedValue::FromValue(DenseArray<float>{std::move(bldr).Build()});
} else if (a.GetType() == GetArrayQType<float>() &&
b.GetType() == GetArrayQType<float>()) {
const auto& va = a.UnsafeAs<Array<float>>();
const auto& vb = b.UnsafeAs<Array<float>>();
DCHECK_EQ(va.size(), vb.size());
DCHECK(va.IsFullForm() && vb.IsFullForm());
Buffer<float>::Builder bldr(va.size());
auto sa = va.dense_data().values.span();
auto sb = vb.dense_data().values.span();
auto sr = bldr.GetMutableSpan();
for (int64_t i = 0; i < va.size(); ++i) {
sr[i] = sa[i] + sb[i];
}
return TypedValue::FromValue(Array<float>{std::move(bldr).Build()});
} else {
return absl::InternalError("Invalid type in BatchedForestEvaluator/Add");
}
}
absl::StatusOr<std::vector<ForestEvaluator>> CreatePointwiseEvaluators(
const BatchedForestEvaluator::CompilationParams& params,
const DecisionForest& decision_forest, const std::vector<TypedSlot>& inputs,
const std::vector<ForestEvaluator::Output>& outputs) {
int64_t split_count = 0;
for (const auto& tree : decision_forest.GetTrees()) {
split_count += tree.split_nodes.size();
}
int64_t evaluator_count = std::max<int64_t>(
1, (split_count + params.optimal_splits_per_evaluator - 1) /
params.optimal_splits_per_evaluator);
std::vector<ForestEvaluator> evaluators;
evaluators.reserve(evaluator_count);
if (evaluator_count == 1) {
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(decision_forest,
inputs, outputs));
evaluators.push_back(std::move(evaluator));
return evaluators;
}
int64_t splits_per_evaluator =
(split_count + evaluator_count - 1) / evaluator_count;
int64_t estimated_trees_per_evaluator =
(decision_forest.GetTrees().size() + evaluator_count - 1) /
evaluator_count;
std::vector<DecisionTree> trees;
trees.reserve(estimated_trees_per_evaluator);
int64_t current_split_count = 0;
for (const auto& tree : decision_forest.GetTrees()) {
trees.push_back(tree);
current_split_count += tree.split_nodes.size();
if (current_split_count >= splits_per_evaluator) {
ASSIGN_OR_RETURN(auto partial_forest,
DecisionForest::FromTrees(std::move(trees)));
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(
*partial_forest, inputs, outputs));
evaluators.push_back(std::move(evaluator));
trees.clear();
trees.reserve(estimated_trees_per_evaluator);
current_split_count = 0;
}
}
if (!trees.empty()) {
ASSIGN_OR_RETURN(auto partial_forest,
DecisionForest::FromTrees(std::move(trees)));
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(*partial_forest,
inputs, outputs));
evaluators.push_back(std::move(evaluator));
}
return evaluators;
}
}
Indestructible<std::unique_ptr<ThreadingInterface>>
BatchedForestEvaluator::threading_;
int64_t BatchedForestEvaluator::min_rows_per_thread_;
absl::StatusOr<std::unique_ptr<BatchedForestEvaluator>>
BatchedForestEvaluator::Compile(const DecisionForest& decision_forest,
absl::Span<const TreeFilter> groups,
const CompilationParams& params) {
FrameLayout::Builder bldr;
std::vector<SlotMapping> input_slots_mapping;
TypedSlot placeholder =
TypedSlot::FromSlot(FrameLayout::Slot<float>::UnsafeUninitializedSlot());
std::vector<TypedSlot> input_pointwise_slots;
for (const auto& kv : decision_forest.GetRequiredQTypes()) {
TypedSlot pointwise_slot = AddSlot(kv.second, &bldr);
while (input_pointwise_slots.size() <= kv.first) {
input_pointwise_slots.push_back(placeholder);
}
input_pointwise_slots[kv.first] = pointwise_slot;
input_slots_mapping.push_back({kv.first, pointwise_slot});
}
std::vector<ForestEvaluator::Output> pointwise_outputs;
std::vector<TypedSlot> output_pointwise_slots;
pointwise_outputs.reserve(groups.size());
output_pointwise_slots.reserve(groups.size());
for (const TreeFilter& filter : groups) {
auto slot = bldr.AddSlot<float>();
pointwise_outputs.push_back({filter, slot});
output_pointwise_slots.push_back(TypedSlot::FromSlot(slot));
}
auto pointwise_layout = std::move(bldr).Build();
ASSIGN_OR_RETURN(
std::vector<ForestEvaluator> pointwise_evaluators,
CreatePointwiseEvaluators(params, decision_forest, input_pointwise_slots,
pointwise_outputs));
return absl::WrapUnique(new BatchedForestEvaluator(
std::move(pointwise_layout), std::move(input_slots_mapping),
std::move(output_pointwise_slots), std::move(pointwise_evaluators)));
}
absl::Status BatchedForestEvaluator::GetInputsFromSlots(
absl::Span<const TypedSlot> input_slots, ConstFramePtr frame,
std::vector<TypedRef>* input_arrays) const {
if (input_slots.size() < input_count_) {
return absl::InvalidArgumentError(
absl::StrFormat("not enough inputs: at least %d expected, %d found",
input_count_, input_slots.size()));
}
for (auto m : input_mapping_) {
input_arrays->push_back(
TypedRef::FromSlot(input_slots[m.input_index], frame));
}
return absl::OkStatus();
}
absl::Status BatchedForestEvaluator::EvalBatch(
absl::Span<const TypedSlot> input_slots,
absl::Span<const TypedSlot> output_slots, FramePtr frame,
RawBufferFactory* buffer_factory, std::optional<int64_t> row_count) const {
std::vector<TypedRef> input_arrays;
input_arrays.reserve(input_mapping_.size());
RETURN_IF_ERROR(GetInputsFromSlots(input_slots, frame, &input_arrays));
if (!row_count.has_value()) {
if (!input_arrays.empty()) {
ASSIGN_OR_RETURN(row_count, GetArraySize(input_arrays[0]));
} else if (!input_slots.empty()) {
ASSIGN_OR_RETURN(row_count,
GetArraySize(TypedRef::FromSlot(input_slots[0], frame)));
}
}
int thread_count = 1;
auto run_evaluator = [&](const ForestEvaluator& eval) -> absl::Status {
ASSIGN_OR_RETURN(
auto frame_iterator,
FrameIterator::Create(
input_arrays, {input_pointwise_slots_.data(), input_arrays.size()},
output_slots, output_pointwise_slots_, &pointwise_layout_,
FrameIterator::Options{.row_count = row_count,
.frame_buffer_count = 64 * thread_count,
.buffer_factory = buffer_factory}));
if (thread_count > 1) {
frame_iterator.ForEachFrame([&eval](FramePtr f) { eval.Eval(f, f); },
**threading_, thread_count);
} else {
frame_iterator.ForEachFrame([&eval](FramePtr f) { eval.Eval(f, f); });
}
return frame_iterator.StoreOutput(frame);
};
if (pointwise_evaluators_.size() == 1) {
return run_evaluator(pointwise_evaluators_.front());
} else {
std::vector<TypedValue> res_sum;
res_sum.reserve(output_slots.size());
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_.front()));
for (const auto& s : output_slots) {
res_sum.push_back(TypedValue::FromSlot(s, frame));
}
for (int eval_id = 1; eval_id < pointwise_evaluators_.size() - 1;
++eval_id) {
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_[eval_id]));
for (int i = 0; i < output_slots.size(); ++i) {
ASSIGN_OR_RETURN(
res_sum[i],
AddFullFloatArrays(res_sum[i].AsRef(),
TypedRef::FromSlot(output_slots[i], frame)));
}
}
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_.back()));
for (int i = 0; i < output_slots.size(); ++i) {
ASSIGN_OR_RETURN(
TypedValue full_sum,
AddFullFloatArrays(res_sum[i].AsRef(),
TypedRef::FromSlot(output_slots[i], frame)));
RETURN_IF_ERROR(full_sum.CopyToSlot(output_slots[i], frame));
}
return absl::OkStatus();
}
}
} | #include "arolla/decision_forest/batched_evaluation/batched_forest_evaluator.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/statusor.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/decision_forest/testing/test_util.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/threading.h"
namespace arolla {
namespace {
absl::StatusOr<DecisionForestPtr> CreateTestForest() {
constexpr float kInf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
std::vector<DecisionTree> trees(2);
trees[0].tag = {.submodel_id = 0};
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, kInf)},
{A(0), A(2), SetOfValuesSplit<int64_t>(1, {1, 2}, false)},
{A(1), A(3), IntervalSplit(0, -kInf, 10)}};
trees[1].tag = {.submodel_id = 1};
trees[1].adjustments = {-1.0, 1.0};
trees[1].split_nodes = {{A(0), A(1), IntervalSplit(0, 1, 5)}};
return DecisionForest::FromTrees(std::move(trees));
}
TEST(BatchedForestEvaluator, EvalBatch) {
ASSERT_OK_AND_ASSIGN(auto forest, CreateTestForest());
std::vector<TreeFilter> groups{{.submodels = {0}}, {.submodels = {1}}};
ASSERT_OK_AND_ASSIGN(auto eval,
BatchedForestEvaluator::Compile(*forest, groups));
FrameLayout::Builder bldr;
auto in1_slot = bldr.AddSlot<DenseArray<float>>();
auto in2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto out1_slot = bldr.AddSlot<DenseArray<float>>();
auto out2_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in1_slot,
CreateDenseArray<float>({0, 0, 1.2, 1.6, 7.0, 13.5, NAN}));
frame.Set(in2_slot, CreateDenseArray<int64_t>({3, 1, 1, 1, 1, 1, {}}));
{
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
}
frame.Set(out1_slot, DenseArray<float>());
frame.Set(out2_slot, DenseArray<float>());
{
BatchedForestEvaluator::SetThreading(std::make_unique<StdThreading>(2));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
BatchedForestEvaluator::SetThreading(nullptr);
}
frame.Set(out1_slot, DenseArray<float>());
frame.Set(out2_slot, DenseArray<float>());
{
BatchedForestEvaluator::SetThreading(std::make_unique<StdThreading>(2),
1);
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
BatchedForestEvaluator::SetThreading(nullptr);
}
}
TEST(BatchedForestEvaluator, UnusedInputs) {
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
DecisionTree tree;
tree.adjustments = {-1, 1};
tree.split_nodes = {{A(0), A(1), IntervalSplit(2, 0, 1)}};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
ASSERT_OK_AND_ASSIGN(auto eval, BatchedForestEvaluator::Compile(*forest));
FrameLayout::Builder bldr;
auto unused1_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto unused2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto in_slot = bldr.AddSlot<DenseArray<float>>();
auto out_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in_slot, CreateDenseArray<float>({-1, 0.5, 2}));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(unused1_slot), TypedSlot::FromSlot(unused2_slot),
TypedSlot::FromSlot(in_slot)},
{TypedSlot::FromSlot(out_slot)}, frame));
EXPECT_THAT(frame.Get(out_slot), ::testing::ElementsAre(-1, 1, -1));
}
TEST(BatchedForestEvaluator, AllInputUnused) {
std::vector<DecisionTree> trees(1);
trees[0].adjustments = {1.5};
ASSERT_OK_AND_ASSIGN(DecisionForestPtr forest,
DecisionForest::FromTrees(std::move(trees)));
std::vector<TreeFilter> groups{{.submodels = {0}}};
ASSERT_OK_AND_ASSIGN(auto eval,
BatchedForestEvaluator::Compile(*forest, groups));
FrameLayout::Builder bldr;
auto in1_slot = bldr.AddSlot<DenseArray<float>>();
auto in2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto out_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in1_slot,
CreateDenseArray<float>({0, 0, 1.2, 1.6, 7.0, 13.5, NAN}));
frame.Set(in2_slot, CreateDenseArray<int64_t>({3, 1, 1, 1, 1, 1, {}}));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out_slot)}, frame));
EXPECT_THAT(frame.Get(out_slot),
::testing::ElementsAre(1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5));
}
TEST(BatchedForestEvaluator, SplitCountPerEvaluator) {
constexpr int64_t min_num_splits = 10;
constexpr int64_t max_num_splits = 30;
constexpr int64_t num_trees = 100;
constexpr int64_t batch_size = 10;
absl::BitGen rnd;
constexpr int64_t min_total_split_count = num_trees * min_num_splits;
int64_t split_count_per_evaluator = absl::Uniform<int64_t>(
rnd, min_total_split_count / 5, min_total_split_count * 4 / 5);
auto forest =
CreateRandomFloatForest(&rnd, 10, true,
min_num_splits, max_num_splits, num_trees);
ASSERT_OK_AND_ASSIGN(auto evaluator,
BatchedForestEvaluator::Compile(*forest));
ASSERT_OK_AND_ASSIGN(
auto subdivided_evaluator,
BatchedForestEvaluator::Compile(*forest, {TreeFilter()},
{split_count_per_evaluator}));
std::vector<TypedSlot> slots;
FrameLayout::Builder layout_builder;
ASSERT_OK(CreateArraySlotsForForest(*forest, &layout_builder, &slots));
auto dense_array_output_slot = layout_builder.AddSlot<DenseArray<float>>();
auto array_output_slot = layout_builder.AddSlot<Array<float>>();
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation ctx(&layout);
FramePtr frame = ctx.frame();
for (auto slot : slots) {
ASSERT_OK(FillArrayWithRandomValues(batch_size, slot, frame, &rnd));
}
ASSERT_OK(evaluator->EvalBatch(slots,
{TypedSlot::FromSlot(dense_array_output_slot)},
frame, nullptr, batch_size));
ASSERT_OK(evaluator->EvalBatch(slots,
{TypedSlot::FromSlot(array_output_slot)},
frame, nullptr, batch_size));
DenseArray<float> dense_array1 = frame.Get(dense_array_output_slot);
Array<float> array1 = frame.Get(array_output_slot);
frame.Set(dense_array_output_slot, DenseArray<float>());
frame.Set(array_output_slot, Array<float>());
ASSERT_OK(subdivided_evaluator->EvalBatch(
slots, {TypedSlot::FromSlot(dense_array_output_slot)}, frame, nullptr,
batch_size));
ASSERT_OK(subdivided_evaluator->EvalBatch(
slots, {TypedSlot::FromSlot(array_output_slot)}, frame, nullptr,
batch_size));
DenseArray<float> dense_array2 = frame.Get(dense_array_output_slot);
Array<float> array2 = frame.Get(array_output_slot);
ASSERT_EQ(dense_array1.size(), batch_size);
ASSERT_EQ(array1.size(), batch_size);
ASSERT_EQ(dense_array2.size(), batch_size);
ASSERT_EQ(array2.size(), batch_size);
for (int64_t i = 0; i < batch_size; ++i) {
bool present = array1[i].present;
EXPECT_EQ(array2[i].present, present);
EXPECT_EQ(dense_array1[i].present, present);
EXPECT_EQ(dense_array2[i].present, present);
if (present) {
float value = array1[i].value;
EXPECT_FLOAT_EQ(array2[i].value, value);
EXPECT_FLOAT_EQ(dense_array1[i].value, value);
EXPECT_FLOAT_EQ(dense_array2[i].value, value);
}
}
}
}
} |
26 | #ifndef ABSL_STRINGS_CORD_BUFFER_H_
#define ABSL_STRINGS_CORD_BUFFER_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/numeric/bits.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class Cord;
class CordBufferTestPeer;
class CordBuffer {
public:
static constexpr size_t kDefaultLimit = cord_internal::kMaxFlatLength;
static constexpr size_t kCustomLimit = 64U << 10;
CordBuffer() = default;
~CordBuffer();
CordBuffer(CordBuffer&& rhs) noexcept;
CordBuffer& operator=(CordBuffer&&) noexcept;
CordBuffer(const CordBuffer&) = delete;
CordBuffer& operator=(const CordBuffer&) = delete;
static constexpr size_t MaximumPayload();
static constexpr size_t MaximumPayload(size_t block_size);
static CordBuffer CreateWithDefaultLimit(size_t capacity);
static CordBuffer CreateWithCustomLimit(size_t block_size, size_t capacity);
absl::Span<char> available();
absl::Span<char> available_up_to(size_t size);
char* data();
const char* data() const;
size_t length() const;
size_t capacity() const;
void IncreaseLengthBy(size_t n);
void SetLength(size_t length);
private:
static_assert(kCustomLimit <= cord_internal::kMaxLargeFlatSize, "");
static constexpr size_t kMaxPageSlop = 128;
static constexpr size_t kOverhead = cord_internal::kFlatOverhead;
using CordRepFlat = cord_internal::CordRepFlat;
struct Rep {
static constexpr size_t kInlineCapacity = sizeof(intptr_t) * 2 - 1;
Rep() : short_rep{} {}
explicit Rep(cord_internal::CordRepFlat* rep) : long_rep{rep} {
assert(rep != nullptr);
}
bool is_short() const {
constexpr size_t offset = offsetof(Short, raw_size);
return (reinterpret_cast<const char*>(this)[offset] & 1) != 0;
}
absl::Span<char> short_available() {
const size_t length = short_length();
return absl::Span<char>(short_rep.data + length,
kInlineCapacity - length);
}
absl::Span<char> long_available() const {
assert(!is_short());
const size_t length = long_rep.rep->length;
return absl::Span<char>(long_rep.rep->Data() + length,
long_rep.rep->Capacity() - length);
}
size_t short_length() const {
assert(is_short());
return static_cast<size_t>(short_rep.raw_size >> 1);
}
void set_short_length(size_t length) {
short_rep.raw_size = static_cast<char>((length << 1) + 1);
}
void add_short_length(size_t n) {
assert(is_short());
short_rep.raw_size += static_cast<char>(n << 1);
}
char* data() {
assert(is_short());
return short_rep.data;
}
const char* data() const {
assert(is_short());
return short_rep.data;
}
cord_internal::CordRepFlat* rep() const {
assert(!is_short());
return long_rep.rep;
}
#if defined(ABSL_IS_BIG_ENDIAN)
struct Long {
explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
void* padding;
cord_internal::CordRepFlat* rep;
};
struct Short {
char data[sizeof(Long) - 1];
char raw_size = 1;
};
#else
struct Long {
explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
cord_internal::CordRepFlat* rep;
void* padding;
};
struct Short {
char raw_size = 1;
char data[sizeof(Long) - 1];
};
#endif
union {
Long long_rep;
Short short_rep;
};
};
static bool IsPow2(size_t size) { return absl::has_single_bit(size); }
static size_t Log2Floor(size_t size) {
return static_cast<size_t>(absl::bit_width(size) - 1);
}
static size_t Log2Ceil(size_t size) {
return static_cast<size_t>(absl::bit_width(size - 1));
}
template <typename... AllocationHints>
static CordBuffer CreateWithCustomLimitImpl(size_t block_size,
size_t capacity,
AllocationHints... hints);
cord_internal::CordRep* ConsumeValue(absl::string_view& short_value) {
cord_internal::CordRep* rep = nullptr;
if (rep_.is_short()) {
short_value = absl::string_view(rep_.data(), rep_.short_length());
} else {
rep = rep_.rep();
}
rep_.set_short_length(0);
return rep;
}
explicit CordBuffer(cord_internal::CordRepFlat* rep) : rep_(rep) {
assert(rep != nullptr);
}
Rep rep_;
friend class Cord;
friend class CordBufferTestPeer;
};
inline constexpr size_t CordBuffer::MaximumPayload() {
return cord_internal::kMaxFlatLength;
}
inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) {
return (std::min)(kCustomLimit, block_size) - cord_internal::kFlatOverhead;
}
inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) {
if (capacity > Rep::kInlineCapacity) {
auto* rep = cord_internal::CordRepFlat::New(capacity);
rep->length = 0;
return CordBuffer(rep);
}
return CordBuffer();
}
template <typename... AllocationHints>
inline CordBuffer CordBuffer::CreateWithCustomLimitImpl(
size_t block_size, size_t capacity, AllocationHints... hints) {
assert(IsPow2(block_size));
capacity = (std::min)(capacity, kCustomLimit);
block_size = (std::min)(block_size, kCustomLimit);
if (capacity + kOverhead >= block_size) {
capacity = block_size;
} else if (capacity <= kDefaultLimit) {
capacity = capacity + kOverhead;
} else if (!IsPow2(capacity)) {
const size_t rounded_up = size_t{1} << Log2Ceil(capacity);
const size_t slop = rounded_up - capacity;
if (slop >= kOverhead && slop <= kMaxPageSlop + kOverhead) {
capacity = rounded_up;
} else {
const size_t rounded_down = size_t{1} << Log2Floor(capacity);
capacity = rounded_down;
}
}
const size_t length = capacity - kOverhead;
auto* rep = CordRepFlat::New(CordRepFlat::Large(), length, hints...);
rep->length = 0;
return CordBuffer(rep);
}
inline CordBuffer CordBuffer::CreateWithCustomLimit(size_t block_size,
size_t capacity) {
return CreateWithCustomLimitImpl(block_size, capacity);
}
inline CordBuffer::~CordBuffer() {
if (!rep_.is_short()) {
cord_internal::CordRepFlat::Delete(rep_.rep());
}
}
inline CordBuffer::CordBuffer(CordBuffer&& rhs) noexcept : rep_(rhs.rep_) {
rhs.rep_.set_short_length(0);
}
inline CordBuffer& CordBuffer::operator=(CordBuffer&& rhs) noexcept {
if (!rep_.is_short()) cord_internal::CordRepFlat::Delete(rep_.rep());
rep_ = rhs.rep_;
rhs.rep_.set_short_length(0);
return *this;
}
inline absl::Span<char> CordBuffer::available() {
return rep_.is_short() ? rep_.short_available() : rep_.long_available();
}
inline absl::Span<char> CordBuffer::available_up_to(size_t size) {
return available().subspan(0, size);
}
inline char* CordBuffer::data() {
return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
}
inline const char* CordBuffer::data() const {
return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
}
inline size_t CordBuffer::capacity() const {
return rep_.is_short() ? Rep::kInlineCapacity : rep_.rep()->Capacity();
}
inline size_t CordBuffer::length() const {
return rep_.is_short() ? rep_.short_length() : rep_.rep()->length;
}
inline void CordBuffer::SetLength(size_t length) {
ABSL_HARDENING_ASSERT(length <= capacity());
if (rep_.is_short()) {
rep_.set_short_length(length);
} else {
rep_.rep()->length = length;
}
}
inline void CordBuffer::IncreaseLengthBy(size_t n) {
ABSL_HARDENING_ASSERT(n <= capacity() && length() + n <= capacity());
if (rep_.is_short()) {
rep_.add_short_length(n);
} else {
rep_.rep()->length += n;
}
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/strings/cord_buffer.h"
#include <cstddef>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t CordBuffer::kDefaultLimit;
constexpr size_t CordBuffer::kCustomLimit;
#endif
ABSL_NAMESPACE_END
} | #include "absl/strings/cord_buffer.h"
#include <algorithm>
#include <cstring>
#include <limits>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
using testing::Eq;
using testing::Ge;
using testing::Le;
using testing::Ne;
namespace absl {
ABSL_NAMESPACE_BEGIN
class CordBufferTestPeer {
public:
static cord_internal::CordRep* ConsumeValue(CordBuffer& buffer,
absl::string_view& short_value) {
return buffer.ConsumeValue(short_value);
}
};
namespace {
using ::absl::cordrep_testing::CordToString;
constexpr size_t kInlinedSize = sizeof(CordBuffer) - 1;
constexpr size_t kDefaultLimit = CordBuffer::kDefaultLimit;
constexpr size_t kCustomLimit = CordBuffer::kCustomLimit;
constexpr size_t kMaxFlatSize = cord_internal::kMaxFlatSize;
constexpr size_t kMaxFlatLength = cord_internal::kMaxFlatLength;
constexpr size_t kFlatOverhead = cord_internal::kFlatOverhead;
constexpr size_t k8KiB = 8 << 10;
constexpr size_t k16KiB = 16 << 10;
constexpr size_t k64KiB = 64 << 10;
constexpr size_t k1MB = 1 << 20;
class CordBufferTest : public testing::TestWithParam<size_t> {};
INSTANTIATE_TEST_SUITE_P(MediumSize, CordBufferTest,
testing::Values(1, kInlinedSize - 1, kInlinedSize,
kInlinedSize + 1, kDefaultLimit - 1,
kDefaultLimit));
TEST_P(CordBufferTest, MaximumPayload) {
EXPECT_THAT(CordBuffer::MaximumPayload(), Eq(kMaxFlatLength));
EXPECT_THAT(CordBuffer::MaximumPayload(512), Eq(512 - kFlatOverhead));
EXPECT_THAT(CordBuffer::MaximumPayload(k64KiB), Eq(k64KiB - kFlatOverhead));
EXPECT_THAT(CordBuffer::MaximumPayload(k1MB), Eq(k64KiB - kFlatOverhead));
}
TEST(CordBufferTest, ConstructDefault) {
CordBuffer buffer;
EXPECT_THAT(buffer.capacity(), Eq(sizeof(CordBuffer) - 1));
EXPECT_THAT(buffer.length(), Eq(0));
EXPECT_THAT(buffer.data(), Ne(nullptr));
EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
memset(buffer.data(), 0xCD, buffer.capacity());
}
TEST(CordBufferTest, CreateSsoWithDefaultLimit) {
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(3);
EXPECT_THAT(buffer.capacity(), Ge(3));
EXPECT_THAT(buffer.capacity(), Le(sizeof(CordBuffer)));
EXPECT_THAT(buffer.length(), Eq(0));
memset(buffer.data(), 0xCD, buffer.capacity());
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
EXPECT_THAT(buffer.length(), Eq(3));
absl::string_view short_value;
EXPECT_THAT(CordBufferTestPeer::ConsumeValue(buffer, short_value),
Eq(nullptr));
EXPECT_THAT(absl::string_view(buffer.data(), 3), Eq("Abc"));
EXPECT_THAT(short_value, Eq("Abc"));
}
TEST_P(CordBufferTest, Available) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
buffer.SetLength(2);
EXPECT_THAT(buffer.available().data(), Eq(buffer.data() + 2));
EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity() - 2));
}
TEST_P(CordBufferTest, IncreaseLengthBy) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
buffer.IncreaseLengthBy(2);
EXPECT_THAT(buffer.length(), Eq(2));
buffer.IncreaseLengthBy(5);
EXPECT_THAT(buffer.length(), Eq(7));
}
TEST_P(CordBufferTest, AvailableUpTo) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
size_t expected_up_to = std::min<size_t>(3, buffer.capacity());
EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data()));
EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
buffer.SetLength(2);
expected_up_to = std::min<size_t>(3, buffer.capacity() - 2);
EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data() + 2));
EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
}
size_t MaxCapacityFor(size_t block_size, size_t requested) {
requested = (std::min)(requested, cord_internal::kMaxLargeFlatSize);
return block_size - kFlatOverhead;
}
TEST_P(CordBufferTest, CreateWithDefaultLimit) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
EXPECT_THAT(buffer.capacity(), Ge(requested));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
EXPECT_THAT(buffer.length(), Eq(0));
memset(buffer.data(), 0xCD, buffer.capacity());
std::string data(requested - 1, 'x');
memcpy(buffer.data(), data.c_str(), requested);
buffer.SetLength(requested);
EXPECT_THAT(buffer.length(), Eq(requested));
EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
}
TEST(CordBufferTest, CreateWithDefaultLimitAskingFor2GB) {
constexpr size_t k2GiB = 1U << 31;
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(k2GiB);
EXPECT_THAT(buffer.capacity(), Le(2 * CordBuffer::kDefaultLimit));
EXPECT_THAT(buffer.length(), Eq(0));
EXPECT_THAT(buffer.data(), Ne(nullptr));
memset(buffer.data(), 0xCD, buffer.capacity());
}
TEST_P(CordBufferTest, MoveConstruct) {
const size_t requested = GetParam();
CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
const size_t capacity = from.capacity();
memcpy(from.data(), "Abc", 4);
from.SetLength(4);
CordBuffer to(std::move(from));
EXPECT_THAT(to.capacity(), Eq(capacity));
EXPECT_THAT(to.length(), Eq(4));
EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
EXPECT_THAT(from.length(), Eq(0));
}
TEST_P(CordBufferTest, MoveAssign) {
const size_t requested = GetParam();
CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
const size_t capacity = from.capacity();
memcpy(from.data(), "Abc", 4);
from.SetLength(4);
CordBuffer to;
to = std::move(from);
EXPECT_THAT(to.capacity(), Eq(capacity));
EXPECT_THAT(to.length(), Eq(4));
EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
EXPECT_THAT(from.length(), Eq(0));
}
TEST_P(CordBufferTest, ConsumeValue) {
const size_t requested = GetParam();
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
memcpy(buffer.data(), "Abc", 4);
buffer.SetLength(3);
absl::string_view short_value;
if (cord_internal::CordRep* rep =
CordBufferTestPeer::ConsumeValue(buffer, short_value)) {
EXPECT_THAT(CordToString(rep), Eq("Abc"));
cord_internal::CordRep::Unref(rep);
} else {
EXPECT_THAT(short_value, Eq("Abc"));
}
EXPECT_THAT(buffer.length(), Eq(0));
}
TEST_P(CordBufferTest, CreateWithCustomLimitWithinDefaultLimit) {
const size_t requested = GetParam();
CordBuffer buffer =
CordBuffer::CreateWithCustomLimit(kMaxFlatSize, requested);
EXPECT_THAT(buffer.capacity(), Ge(requested));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
EXPECT_THAT(buffer.length(), Eq(0));
memset(buffer.data(), 0xCD, buffer.capacity());
std::string data(requested - 1, 'x');
memcpy(buffer.data(), data.c_str(), requested);
buffer.SetLength(requested);
EXPECT_THAT(buffer.length(), Eq(requested));
EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
}
TEST(CordLargeBufferTest, CreateAtOrBelowDefaultLimit) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, kDefaultLimit);
EXPECT_THAT(buffer.capacity(), Ge(kDefaultLimit));
EXPECT_THAT(buffer.capacity(),
Le(MaxCapacityFor(kMaxFlatSize, kDefaultLimit)));
buffer = CordBuffer::CreateWithCustomLimit(k64KiB, 3178);
EXPECT_THAT(buffer.capacity(), Ge(3178));
}
TEST(CordLargeBufferTest, CreateWithCustomLimit) {
ASSERT_THAT((kMaxFlatSize & (kMaxFlatSize - 1)) == 0, "Must be power of 2");
for (size_t size = kMaxFlatSize; size <= kCustomLimit; size *= 2) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(size, size);
size_t expected = size - kFlatOverhead;
ASSERT_THAT(buffer.capacity(), Ge(expected));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(size, expected)));
}
}
TEST(CordLargeBufferTest, CreateWithTooLargeLimit) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, k1MB);
ASSERT_THAT(buffer.capacity(), Ge(k64KiB - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k64KiB, k1MB)));
}
TEST(CordLargeBufferTest, CreateWithHugeValueForOverFlowHardening) {
for (size_t dist_from_max = 0; dist_from_max <= 32; ++dist_from_max) {
size_t capacity = std::numeric_limits<size_t>::max() - dist_from_max;
CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(capacity);
ASSERT_THAT(buffer.capacity(), Ge(kDefaultLimit));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, capacity)));
for (size_t limit = kMaxFlatSize; limit <= kCustomLimit; limit *= 2) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(limit, capacity);
ASSERT_THAT(buffer.capacity(), Ge(limit - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(limit, capacity)));
}
}
}
TEST(CordLargeBufferTest, CreateWithSmallLimit) {
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(512, 1024);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 1024)));
buffer = CordBuffer::CreateWithCustomLimit(512, 512);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 512)));
buffer = CordBuffer::CreateWithCustomLimit(512, 511);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 511)));
buffer = CordBuffer::CreateWithCustomLimit(512, 498);
ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 498)));
}
TEST(CordLargeBufferTest, CreateWasteFull) {
const size_t requested = (15 << 10);
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
ASSERT_THAT(buffer.capacity(), Ge(k8KiB - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k8KiB, requested)));
}
TEST(CordLargeBufferTest, CreateSmallSlop) {
const size_t requested = k16KiB - 2 * kFlatOverhead;
CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
ASSERT_THAT(buffer.capacity(), Ge(k16KiB - kFlatOverhead));
EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k16KiB, requested)));
}
}
ABSL_NAMESPACE_END
} |
27 | #ifndef TENSORFLOW_CORE_KERNELS_DATA_REPEAT_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_REPEAT_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class RepeatDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Repeat";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kCount = "count";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit RepeatDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
};
}
}
#endif
#include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const RepeatDatasetOp::kDatasetType;
constexpr const char* const RepeatDatasetOp::kInputDataset;
constexpr const char* const RepeatDatasetOp::kCount;
constexpr const char* const RepeatDatasetOp::kOutputTypes;
constexpr const char* const RepeatDatasetOp::kOutputShapes;
namespace {
constexpr char kForeverRepeat[] = "ForeverRepeat";
constexpr char kEmptyRepeat[] = "EmptyRepeat";
constexpr char kFiniteRepeat[] = "FiniteRepeat";
constexpr char kCurIteration[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kUninitialized[] = "uninitialized";
constexpr int64_t kKnownRatio = 1;
std::string nested_prefix(const std::string& prefix, int64_t epoch) {
return strings::StrCat(prefix, "[", epoch, "]");
}
bool HasDataServiceInput(const DatasetBase* dataset) {
DCHECK(dataset != nullptr);
if (absl::StartsWith(dataset->type_string(), "DataServiceDataset")) {
return true;
}
std::vector<const DatasetBase*> inputs;
Status s = dataset->InputDatasets(&inputs);
if (!s.ok()) {
return false;
}
for (const DatasetBase* input : inputs) {
if (HasDataServiceInput(input)) {
return true;
}
}
return false;
}
class RepeatedSplitProvider : public SplitProvider {
public:
explicit RepeatedSplitProvider(std::unique_ptr<SplitProvider> split_provider,
int64_t count)
: split_provider_(std::move(split_provider)), count_(count) {}
int64_t Cardinality() const override {
if (split_provider_->Cardinality() == 0 || count_ == 0) {
return 0;
}
if (count_ < 0) {
return kInfiniteCardinality;
}
if (split_provider_->Cardinality() < 0) {
return split_provider_->Cardinality();
}
return split_provider_->Cardinality() * count_;
}
absl::Status GetNext(Tensor* split, bool* end_of_splits) override {
return split_provider_->GetNext(split, end_of_splits);
}
absl::Status Reset() override { return split_provider_->Reset(); }
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override {
return split_provider_->Save(full_name, writer);
}
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override {
return split_provider_->Restore(full_name, reader);
}
void Cancel() override { split_provider_->Cancel(); }
private:
const std::unique_ptr<SplitProvider> split_provider_;
const int64_t count_;
};
}
class RepeatDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
if (input_ != nullptr && !input_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
} else if (count <= 0) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("`repeat(", count,
")` does not support random access of tf.data "
"datasets."));
} else {
random_indexing_compatible_ = absl::OkStatus();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
if (count_ < 0) {
return std::make_unique<ForeverIterator>(ForeverIterator::Params{
this, name_utils::IteratorPrefix(kForeverRepeat, prefix)});
} else if (count_ == 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptyRepeat, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteRepeat, prefix)});
}
}
absl::Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
std::vector<std::unique_ptr<SplitProvider>> input_split_providers;
TF_RETURN_IF_ERROR(input_->MakeSplitProviders(&input_split_providers));
split_providers->clear();
for (auto& split_provider : input_split_providers) {
split_providers->push_back(std::make_unique<RepeatedSplitProvider>(
std::move(split_provider), count_));
}
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(RepeatDatasetOp::kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (count_ < 0) {
if (n == 0) {
return 0;
}
return kInfiniteCardinality;
}
if (count_ == 0) {
return 0;
}
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return count_ * n;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index % input_->Cardinality(), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
private:
class EmptyIterator : public DatasetIterator<Dataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class FiniteIterator : public DatasetIterator<Dataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (i_ < dataset()->count_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
ctx_with_index_mapper.MergeCheckpoint();
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
input_impl_.reset();
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(IndexMapperFn parent_index_mapper)
const override TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t input_cardinality = dataset()->input_->Cardinality();
int64_t repeat_count = i_;
return [parent_index_mapper, input_cardinality,
repeat_count](size_t element_position) -> absl::StatusOr<size_t> {
if (element_position >= input_cardinality) {
return element_position;
}
size_t repeated_element_position =
repeat_count * input_cardinality + element_position;
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(repeated_element_position));
return shuffled_element_position % input_cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (ctx->restored_element_count().has_value()) {
CardinalityOptions options;
options.set_compute_level(
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE);
const int64_t input_cardinality =
dataset()->input_->Cardinality(std::move(options));
i_ = *ctx->restored_element_count() / input_cardinality;
IteratorContext::Params params(ctx);
params.restored_element_count =
*ctx->restored_element_count() % input_cardinality;
params.index_mapper = GetIndexMapper(ctx->index_mapper());
IteratorContext ctx_with_restored_element_count(params);
return RestoreInput(&ctx_with_restored_element_count, reader,
input_impl_);
}
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(!input_empty)) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
class ForeverIterator : public DatasetIterator<Dataset> {
public:
explicit ForeverIterator(const Params& params)
: DatasetIterator<Dataset>(params),
has_data_service_input_(HasDataServiceInput(dataset())),
input_impl_(nullptr),
i_(0),
first_call_(true) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (!input_impl_) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
DCHECK(!*end_of_sequence || out_tensors->empty());
if (first_call_ && *end_of_sequence && ctx->split_providers().empty()) {
if (!has_data_service_input_) {
input_impl_.reset();
return absl::OkStatus();
}
}
first_call_ = false;
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
input_impl_.reset();
first_call_ = true;
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(input_empty)) {
input_impl_.reset();
first_call_ = true;
} else {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
first_call_ = false;
}
return absl::OkStatus();
}
private:
const bool has_data_service_input_;
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t i_ TF_GUARDED_BY(mu_);
bool first_call_ TF_GUARDED_BY(mu_);
};
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
RepeatDatasetOp::RepeatDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RepeatDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new Dataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RepeatDataset").Device(DEVICE_CPU),
RepeatDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "repeat_dataset";
class RepeatDatasetParams : public DatasetParams {
public:
template <typename T>
RepeatDatasetParams(T input_dataset_params, int64_t count,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
count_(count) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {count_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(RepeatDatasetOp::kInputDataset);
input_names->emplace_back(RepeatDatasetOp::kCount);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return RepeatDatasetOp::kDatasetType; }
private:
int64_t count_;
};
class RepeatDatasetOpTest : public DatasetOpsTestBase {};
RepeatDatasetParams FiniteRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
2,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams EmptyRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
0,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams ForeverRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 1}, {1, 2})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
-1,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<RepeatDatasetParams>> GetNextTestCases() {
return {{FiniteRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{}},
{
ForeverRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorGetNextOpTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorGetNextOpTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
if (dataset_->Cardinality() == kInfiniteCardinality) {
for (int i = 0; i < 100; ++i) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
for (const auto& tensor : out_tensors) {
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
if (expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
}
EXPECT_FALSE(end_of_sequence);
} else {
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
}
INSTANTIATE_TEST_SUITE_P(RepeatDatasetOpTest,
ParameterizedIteratorGetNextOpTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(RepeatDatasetOpTest, DatasetNodeName) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(RepeatDatasetOpTest, DatasetTypeString) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(RepeatDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<RepeatDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<RepeatDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<RepeatDatasetParams>>
DatasetCardinalityTestCases() {
return {{FiniteRepeatDatasetParams(), 4},
{EmptyRepeatDatasetParams(), 0},
{ForeverRepeatDatasetParams(),
kInfiniteCardinality}};
}
DATASET_CARDINALITY_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<RepeatDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<RepeatDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<RepeatDatasetParams>>
IteratorPrefixTestCases() {
return {
{FiniteRepeatDatasetParams(),
name_utils::IteratorPrefix(
"FiniteRepeat", FiniteRepeatDatasetParams().iterator_prefix())},
{EmptyRepeatDatasetParams(),
name_utils::IteratorPrefix(
"EmptyRepeat", EmptyRepeatDatasetParams().iterator_prefix())},
{ForeverRepeatDatasetParams(),
name_utils::IteratorPrefix(
"ForeverRepeat", ForeverRepeatDatasetParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<RepeatDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FiniteRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{0, 1, 3},
{}},
{
ForeverRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, Roundtrip) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = dataset_->Cardinality() == 0;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
std::vector<int> breakpoints = GetParam().breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration < breakpoint) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
cur_iteration++;
if (dataset_->Cardinality() == kInfiniteCardinality &&
expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
if (breakpoint >= dataset_->Cardinality()) {
if (dataset_->Cardinality() == kInfiniteCardinality) {
EXPECT_FALSE(end_of_sequence);
} else {
EXPECT_TRUE(end_of_sequence);
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
} else {
EXPECT_FALSE(end_of_sequence);
}
}
}
INSTANTIATE_TEST_SUITE_P(
RepeatDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} |
28 | #ifndef QUICHE_QUIC_QBONE_PLATFORM_RTNETLINK_MESSAGE_H_
#define QUICHE_QUIC_QBONE_PLATFORM_RTNETLINK_MESSAGE_H_
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <stdint.h>
#include <sys/socket.h>
#include <sys/uio.h>
#include <memory>
#include <vector>
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
class RtnetlinkMessage {
public:
virtual ~RtnetlinkMessage();
enum class Operation {
NEW,
DEL,
GET,
};
virtual void AppendAttribute(uint16_t type, const void* data,
uint16_t data_length);
std::unique_ptr<struct iovec[]> BuildIoVec() const;
size_t IoVecSize() const;
protected:
RtnetlinkMessage(uint16_t type, uint16_t flags, uint32_t seq, uint32_t pid,
const void* payload_header, size_t payload_header_length);
void AdjustMessageLength(size_t additional_data_length);
private:
struct nlmsghdr* MessageHeader();
std::vector<struct iovec> message_;
};
class LinkMessage : public RtnetlinkMessage {
public:
static LinkMessage New(RtnetlinkMessage::Operation request_operation,
uint16_t flags, uint32_t seq, uint32_t pid,
const struct ifinfomsg* interface_info_header);
private:
using RtnetlinkMessage::RtnetlinkMessage;
};
class AddressMessage : public RtnetlinkMessage {
public:
static AddressMessage New(RtnetlinkMessage::Operation request_operation,
uint16_t flags, uint32_t seq, uint32_t pid,
const struct ifaddrmsg* interface_address_header);
private:
using RtnetlinkMessage::RtnetlinkMessage;
};
class RouteMessage : public RtnetlinkMessage {
public:
static RouteMessage New(RtnetlinkMessage::Operation request_operation,
uint16_t flags, uint32_t seq, uint32_t pid,
const struct rtmsg* route_message_header);
private:
using RtnetlinkMessage::RtnetlinkMessage;
};
class RuleMessage : public RtnetlinkMessage {
public:
static RuleMessage New(RtnetlinkMessage::Operation request_operation,
uint16_t flags, uint32_t seq, uint32_t pid,
const struct rtmsg* rule_message_header);
private:
using RtnetlinkMessage::RtnetlinkMessage;
};
}
#endif
#include "quiche/quic/qbone/platform/rtnetlink_message.h"
#include <memory>
#include <utility>
namespace quic {
RtnetlinkMessage::RtnetlinkMessage(uint16_t type, uint16_t flags, uint32_t seq,
uint32_t pid, const void* payload_header,
size_t payload_header_length) {
auto* buf = new uint8_t[NLMSG_SPACE(payload_header_length)];
memset(buf, 0, NLMSG_SPACE(payload_header_length));
auto* message_header = reinterpret_cast<struct nlmsghdr*>(buf);
message_header->nlmsg_len = NLMSG_LENGTH(payload_header_length);
message_header->nlmsg_type = type;
message_header->nlmsg_flags = flags;
message_header->nlmsg_seq = seq;
message_header->nlmsg_pid = pid;
if (payload_header != nullptr) {
memcpy(NLMSG_DATA(message_header), payload_header, payload_header_length);
}
message_.push_back({buf, NLMSG_SPACE(payload_header_length)});
}
RtnetlinkMessage::~RtnetlinkMessage() {
for (const auto& iov : message_) {
delete[] reinterpret_cast<uint8_t*>(iov.iov_base);
}
}
void RtnetlinkMessage::AppendAttribute(uint16_t type, const void* data,
uint16_t data_length) {
auto* buf = new uint8_t[RTA_SPACE(data_length)];
memset(buf, 0, RTA_SPACE(data_length));
auto* rta = reinterpret_cast<struct rtattr*>(buf);
static_assert(sizeof(uint16_t) == sizeof(rta->rta_len),
"struct rtattr uses unsigned short, it's no longer 16bits");
static_assert(sizeof(uint16_t) == sizeof(rta->rta_type),
"struct rtattr uses unsigned short, it's no longer 16bits");
rta->rta_len = RTA_LENGTH(data_length);
rta->rta_type = type;
memcpy(RTA_DATA(rta), data, data_length);
message_.push_back({buf, RTA_SPACE(data_length)});
AdjustMessageLength(rta->rta_len);
}
std::unique_ptr<struct iovec[]> RtnetlinkMessage::BuildIoVec() const {
auto message = std::make_unique<struct iovec[]>(message_.size());
int idx = 0;
for (const auto& vec : message_) {
message[idx++] = vec;
}
return message;
}
size_t RtnetlinkMessage::IoVecSize() const { return message_.size(); }
void RtnetlinkMessage::AdjustMessageLength(size_t additional_data_length) {
MessageHeader()->nlmsg_len =
NLMSG_ALIGN(MessageHeader()->nlmsg_len) + additional_data_length;
}
struct nlmsghdr* RtnetlinkMessage::MessageHeader() {
return reinterpret_cast<struct nlmsghdr*>(message_[0].iov_base);
}
LinkMessage LinkMessage::New(RtnetlinkMessage::Operation request_operation,
uint16_t flags, uint32_t seq, uint32_t pid,
const struct ifinfomsg* interface_info_header) {
uint16_t request_type;
switch (request_operation) {
case RtnetlinkMessage::Operation::NEW:
request_type = RTM_NEWLINK;
break;
case RtnetlinkMessage::Operation::DEL:
request_type = RTM_DELLINK;
break;
case RtnetlinkMessage::Operation::GET:
request_type = RTM_GETLINK;
break;
}
bool is_get = request_type == RTM_GETLINK;
if (is_get) {
struct rtgenmsg g = {AF_UNSPEC};
return LinkMessage(request_type, flags, seq, pid, &g, sizeof(g));
}
return LinkMessage(request_type, flags, seq, pid, interface_info_header,
sizeof(struct ifinfomsg));
}
AddressMessage AddressMessage::New(
RtnetlinkMessage::Operation request_operation, uint16_t flags, uint32_t seq,
uint32_t pid, const struct ifaddrmsg* interface_address_header) {
uint16_t request_type;
switch (request_operation) {
case RtnetlinkMessage::Operation::NEW:
request_type = RTM_NEWADDR;
break;
case RtnetlinkMessage::Operation::DEL:
request_type = RTM_DELADDR;
break;
case RtnetlinkMessage::Operation::GET:
request_type = RTM_GETADDR;
break;
}
bool is_get = request_type == RTM_GETADDR;
if (is_get) {
struct rtgenmsg g = {AF_UNSPEC};
return AddressMessage(request_type, flags, seq, pid, &g, sizeof(g));
}
return AddressMessage(request_type, flags, seq, pid, interface_address_header,
sizeof(struct ifaddrmsg));
}
RouteMessage RouteMessage::New(RtnetlinkMessage::Operation request_operation,
uint16_t flags, uint32_t seq, uint32_t pid,
const struct rtmsg* route_message_header) {
uint16_t request_type;
switch (request_operation) {
case RtnetlinkMessage::Operation::NEW:
request_type = RTM_NEWROUTE;
break;
case RtnetlinkMessage::Operation::DEL:
request_type = RTM_DELROUTE;
break;
case RtnetlinkMessage::Operation::GET:
request_type = RTM_GETROUTE;
break;
}
return RouteMessage(request_type, flags, seq, pid, route_message_header,
sizeof(struct rtmsg));
}
RuleMessage RuleMessage::New(RtnetlinkMessage::Operation request_operation,
uint16_t flags, uint32_t seq, uint32_t pid,
const struct rtmsg* rule_message_header) {
uint16_t request_type;
switch (request_operation) {
case RtnetlinkMessage::Operation::NEW:
request_type = RTM_NEWRULE;
break;
case RtnetlinkMessage::Operation::DEL:
request_type = RTM_DELRULE;
break;
case RtnetlinkMessage::Operation::GET:
request_type = RTM_GETRULE;
break;
}
return RuleMessage(request_type, flags, seq, pid, rule_message_header,
sizeof(rtmsg));
}
} | #include "quiche/quic/qbone/platform/rtnetlink_message.h"
#include <net/if_arp.h>
#include <string>
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace {
using ::testing::StrEq;
TEST(RtnetlinkMessageTest, LinkMessageCanBeCreatedForGetOperation) {
uint16_t flags = NLM_F_REQUEST | NLM_F_ROOT | NLM_F_MATCH;
uint32_t seq = 42;
uint32_t pid = 7;
auto message = LinkMessage::New(RtnetlinkMessage::Operation::GET, flags, seq,
pid, nullptr);
EXPECT_EQ(1, message.IoVecSize());
auto iov = message.BuildIoVec();
EXPECT_EQ(NLMSG_SPACE(sizeof(struct rtgenmsg)), iov[0].iov_len);
auto* netlink_message = reinterpret_cast<struct nlmsghdr*>(iov[0].iov_base);
EXPECT_EQ(NLMSG_LENGTH(sizeof(struct rtgenmsg)), netlink_message->nlmsg_len);
EXPECT_EQ(RTM_GETLINK, netlink_message->nlmsg_type);
EXPECT_EQ(flags, netlink_message->nlmsg_flags);
EXPECT_EQ(seq, netlink_message->nlmsg_seq);
EXPECT_EQ(pid, netlink_message->nlmsg_pid);
EXPECT_EQ(NLMSG_LENGTH(sizeof(struct rtgenmsg)), netlink_message->nlmsg_len);
}
TEST(RtnetlinkMessageTest, LinkMessageCanBeCreatedForNewOperation) {
struct ifinfomsg interface_info_header = {AF_INET, 0, ARPHRD_TUNNEL,
3, 0, 0xffffffff};
uint16_t flags = NLM_F_REQUEST | NLM_F_ROOT | NLM_F_MATCH;
uint32_t seq = 42;
uint32_t pid = 7;
auto message = LinkMessage::New(RtnetlinkMessage::Operation::NEW, flags, seq,
pid, &interface_info_header);
std::string device_name = "device0";
message.AppendAttribute(IFLA_IFNAME, device_name.c_str(), device_name.size());
EXPECT_EQ(2, message.IoVecSize());
auto iov = message.BuildIoVec();
EXPECT_EQ(NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct ifinfomsg))),
iov[0].iov_len);
auto* netlink_message = reinterpret_cast<struct nlmsghdr*>(iov[0].iov_base);
EXPECT_EQ(NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct ifinfomsg))) +
RTA_LENGTH(device_name.size()),
netlink_message->nlmsg_len);
EXPECT_EQ(RTM_NEWLINK, netlink_message->nlmsg_type);
EXPECT_EQ(flags, netlink_message->nlmsg_flags);
EXPECT_EQ(seq, netlink_message->nlmsg_seq);
EXPECT_EQ(pid, netlink_message->nlmsg_pid);
auto* parsed_header =
reinterpret_cast<struct ifinfomsg*>(NLMSG_DATA(netlink_message));
EXPECT_EQ(interface_info_header.ifi_family, parsed_header->ifi_family);
EXPECT_EQ(interface_info_header.ifi_type, parsed_header->ifi_type);
EXPECT_EQ(interface_info_header.ifi_index, parsed_header->ifi_index);
EXPECT_EQ(interface_info_header.ifi_flags, parsed_header->ifi_flags);
EXPECT_EQ(interface_info_header.ifi_change, parsed_header->ifi_change);
EXPECT_EQ(RTA_SPACE(device_name.size()), iov[1].iov_len);
auto* rta = reinterpret_cast<struct rtattr*>(iov[1].iov_base);
EXPECT_EQ(IFLA_IFNAME, rta->rta_type);
EXPECT_EQ(RTA_LENGTH(device_name.size()), rta->rta_len);
EXPECT_THAT(device_name,
StrEq(std::string(reinterpret_cast<char*>(RTA_DATA(rta)),
RTA_PAYLOAD(rta))));
}
TEST(RtnetlinkMessageTest, AddressMessageCanBeCreatedForGetOperation) {
uint16_t flags = NLM_F_REQUEST | NLM_F_ROOT | NLM_F_MATCH;
uint32_t seq = 42;
uint32_t pid = 7;
auto message = AddressMessage::New(RtnetlinkMessage::Operation::GET, flags,
seq, pid, nullptr);
EXPECT_EQ(1, message.IoVecSize());
auto iov = message.BuildIoVec();
EXPECT_EQ(NLMSG_SPACE(sizeof(struct rtgenmsg)), iov[0].iov_len);
auto* netlink_message = reinterpret_cast<struct nlmsghdr*>(iov[0].iov_base);
EXPECT_EQ(NLMSG_LENGTH(sizeof(struct rtgenmsg)), netlink_message->nlmsg_len);
EXPECT_EQ(RTM_GETADDR, netlink_message->nlmsg_type);
EXPECT_EQ(flags, netlink_message->nlmsg_flags);
EXPECT_EQ(seq, netlink_message->nlmsg_seq);
EXPECT_EQ(pid, netlink_message->nlmsg_pid);
EXPECT_EQ(NLMSG_LENGTH(sizeof(struct rtgenmsg)), netlink_message->nlmsg_len);
}
TEST(RtnetlinkMessageTest, AddressMessageCanBeCreatedForNewOperation) {
struct ifaddrmsg interface_address_header = {AF_INET,
24,
0,
RT_SCOPE_LINK,
4};
uint16_t flags = NLM_F_REQUEST | NLM_F_ROOT | NLM_F_MATCH;
uint32_t seq = 42;
uint32_t pid = 7;
auto message = AddressMessage::New(RtnetlinkMessage::Operation::NEW, flags,
seq, pid, &interface_address_header);
QuicIpAddress ip;
QUICHE_CHECK(ip.FromString("10.0.100.3"));
message.AppendAttribute(IFA_ADDRESS, ip.ToPackedString().c_str(),
ip.ToPackedString().size());
EXPECT_EQ(2, message.IoVecSize());
auto iov = message.BuildIoVec();
EXPECT_EQ(NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct ifaddrmsg))),
iov[0].iov_len);
auto* netlink_message = reinterpret_cast<struct nlmsghdr*>(iov[0].iov_base);
EXPECT_EQ(NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct ifaddrmsg))) +
RTA_LENGTH(ip.ToPackedString().size()),
netlink_message->nlmsg_len);
EXPECT_EQ(RTM_NEWADDR, netlink_message->nlmsg_type);
EXPECT_EQ(flags, netlink_message->nlmsg_flags);
EXPECT_EQ(seq, netlink_message->nlmsg_seq);
EXPECT_EQ(pid, netlink_message->nlmsg_pid);
auto* parsed_header =
reinterpret_cast<struct ifaddrmsg*>(NLMSG_DATA(netlink_message));
EXPECT_EQ(interface_address_header.ifa_family, parsed_header->ifa_family);
EXPECT_EQ(interface_address_header.ifa_prefixlen,
parsed_header->ifa_prefixlen);
EXPECT_EQ(interface_address_header.ifa_flags, parsed_header->ifa_flags);
EXPECT_EQ(interface_address_header.ifa_scope, parsed_header->ifa_scope);
EXPECT_EQ(interface_address_header.ifa_index, parsed_header->ifa_index);
EXPECT_EQ(RTA_SPACE(ip.ToPackedString().size()), iov[1].iov_len);
auto* rta = reinterpret_cast<struct rtattr*>(iov[1].iov_base);
EXPECT_EQ(IFA_ADDRESS, rta->rta_type);
EXPECT_EQ(RTA_LENGTH(ip.ToPackedString().size()), rta->rta_len);
EXPECT_THAT(ip.ToPackedString(),
StrEq(std::string(reinterpret_cast<char*>(RTA_DATA(rta)),
RTA_PAYLOAD(rta))));
}
TEST(RtnetlinkMessageTest, RouteMessageCanBeCreatedFromNewOperation) {
struct rtmsg route_message_header = {AF_INET6,
48,
0,
0,
RT_TABLE_MAIN,
RTPROT_STATIC,
RT_SCOPE_LINK,
RTN_LOCAL,
0};
uint16_t flags = NLM_F_REQUEST | NLM_F_ROOT | NLM_F_MATCH;
uint32_t seq = 42;
uint32_t pid = 7;
auto message = RouteMessage::New(RtnetlinkMessage::Operation::NEW, flags, seq,
pid, &route_message_header);
QuicIpAddress preferred_source;
QUICHE_CHECK(preferred_source.FromString("ff80::1"));
message.AppendAttribute(RTA_PREFSRC,
preferred_source.ToPackedString().c_str(),
preferred_source.ToPackedString().size());
EXPECT_EQ(2, message.IoVecSize());
auto iov = message.BuildIoVec();
EXPECT_EQ(NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct rtmsg))), iov[0].iov_len);
auto* netlink_message = reinterpret_cast<struct nlmsghdr*>(iov[0].iov_base);
EXPECT_EQ(NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct rtmsg))) +
RTA_LENGTH(preferred_source.ToPackedString().size()),
netlink_message->nlmsg_len);
EXPECT_EQ(RTM_NEWROUTE, netlink_message->nlmsg_type);
EXPECT_EQ(flags, netlink_message->nlmsg_flags);
EXPECT_EQ(seq, netlink_message->nlmsg_seq);
EXPECT_EQ(pid, netlink_message->nlmsg_pid);
auto* parsed_header =
reinterpret_cast<struct rtmsg*>(NLMSG_DATA(netlink_message));
EXPECT_EQ(route_message_header.rtm_family, parsed_header->rtm_family);
EXPECT_EQ(route_message_header.rtm_dst_len, parsed_header->rtm_dst_len);
EXPECT_EQ(route_message_header.rtm_src_len, parsed_header->rtm_src_len);
EXPECT_EQ(route_message_header.rtm_tos, parsed_header->rtm_tos);
EXPECT_EQ(route_message_header.rtm_table, parsed_header->rtm_table);
EXPECT_EQ(route_message_header.rtm_protocol, parsed_header->rtm_protocol);
EXPECT_EQ(route_message_header.rtm_scope, parsed_header->rtm_scope);
EXPECT_EQ(route_message_header.rtm_type, parsed_header->rtm_type);
EXPECT_EQ(route_message_header.rtm_flags, parsed_header->rtm_flags);
EXPECT_EQ(RTA_SPACE(preferred_source.ToPackedString().size()),
iov[1].iov_len);
auto* rta = reinterpret_cast<struct rtattr*>(iov[1].iov_base);
EXPECT_EQ(RTA_PREFSRC, rta->rta_type);
EXPECT_EQ(RTA_LENGTH(preferred_source.ToPackedString().size()), rta->rta_len);
EXPECT_THAT(preferred_source.ToPackedString(),
StrEq(std::string(reinterpret_cast<char*>(RTA_DATA(rta)),
RTA_PAYLOAD(rta))));
}
}
} |
29 | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBEXPORT_SAVE_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBEXPORT_SAVE_H_
#include <string>
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace libexport {
TF_EXPORT Status Save(const std::string& export_dir);
}
}
#endif
#include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace libexport {
Status Save(const std::string& export_dir) {
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(export_dir));
return absl::OkStatus();
}
}
} | #include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace libexport {
namespace {
TEST(SaveTest, TestDirectoryStructure) {
const string base_dir = tensorflow::io::JoinPath(
tensorflow::testing::TmpDir(), "test_directory_structure");
TF_ASSERT_OK(Save(base_dir));
TF_ASSERT_OK(Env::Default()->IsDirectory(base_dir));
}
}
}
} |
30 | #ifndef XLA_SERVICE_GPU_RUNTIME_CONVOLUTION_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_CONVOLUTION_THUNK_H_
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class ConvolutionThunk : public Thunk {
public:
ConvolutionThunk(ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice);
ConvolutionThunk(const ConvolutionThunk&) = delete;
ConvolutionThunk& operator=(const ConvolutionThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::vector<BufferAllocation::Slice> operand_buffers_;
std::vector<BufferAllocation::Slice> result_buffers_;
BufferAllocation::Slice scratch_buffer_;
GenericConvRunner& GetOrCreateRunner(const stream_executor::Stream* stream,
bool* runner_created);
const GpuConvConfig config_;
absl::Mutex mu_;
absl::flat_hash_map<const stream_executor::Stream*,
std::unique_ptr<GenericConvRunner>>
runner_cache_ ABSL_GUARDED_BY(mu_);
};
class ConvolutionReorderThunk : public Thunk {
public:
ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices);
ConvolutionReorderThunk(const ConvolutionReorderThunk&) = delete;
ConvolutionReorderThunk& operator=(const ConvolutionReorderThunk&) = delete;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
static se::dnn::FilterDescriptor CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw);
const se::dnn::FilterDescriptor filter_descriptor_;
absl::InlinedVector<BufferAllocation::Slice, 2> operand_buffers_;
absl::InlinedVector<BufferAllocation::Slice, 2> result_buffers_;
};
}
}
#endif
#include "xla/service/gpu/runtime/convolution_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#if TENSORFLOW_USE_ROCM
#include "xla/service/gpu/stream_executor_util.h"
#endif
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
ConvolutionThunk::ConvolutionThunk(
ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice)
: Thunk(Kind::kConvolution, thunk_info),
operand_buffers_(std::move(operand_slices)),
result_buffers_(std::move(result_slices)),
scratch_buffer_(scratch_slice),
config_(std::move(config)) {}
GenericConvRunner& ConvolutionThunk::GetOrCreateRunner(
const stream_executor::Stream* stream, bool* runner_created) {
absl::MutexLock lock(&mu_);
auto it = runner_cache_.find(stream);
*runner_created = (it == runner_cache_.end());
if (*runner_created) {
it = runner_cache_
.insert({stream, std::make_unique<GenericConvRunner>(config_)})
.first;
}
return *it->second;
}
absl::Status ConvolutionThunk::ExecuteOnStream(const ExecuteParams& params) {
const auto& buffer_allocations = *params.buffer_allocations;
std::vector<se::DeviceMemoryBase> operand_se_buffers, result_se_buffers;
operand_se_buffers.reserve(operand_buffers_.size());
for (BufferAllocation::Slice buffer : operand_buffers_) {
operand_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
result_se_buffers.reserve(result_buffers_.size());
for (BufferAllocation::Slice buffer : result_buffers_) {
result_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
se::DeviceMemoryBase scratch =
buffer_allocations.GetDeviceAddress(scratch_buffer_);
bool runner_created = false;
RunConvOptions opts;
opts.runner_cache = &GetOrCreateRunner(params.stream, &runner_created);
#if TENSORFLOW_USE_ROCM
if (runner_created) {
TF_ASSIGN_OR_RETURN(
GpuConvParams conv_params,
GetGpuConvParams(config_, operand_se_buffers, result_se_buffers));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config_.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config_.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config_.output_type));
TF_ASSIGN_OR_RETURN(auto dnn,
se::dnn::internal::GetDnnFromStream(params.stream));
se::OwningScratchAllocator<> scratch_allocator(
buffer_allocations.device_ordinal(),
buffer_allocations.memory_allocator());
std::vector<se::dnn::ProfileResult> profile_results;
dnn->GetMIOpenConvolveAlgorithms(
kind, input_type, output_type, params.stream, config_.input_descriptor,
conv_params.input_buf, config_.filter_descriptor,
conv_params.filter_buf, config_.output_descriptor,
conv_params.output_buf, config_.conv_desc, &scratch_allocator,
&profile_results);
}
#endif
TF_RETURN_IF_ERROR(RunGpuConv(config_, absl::MakeSpan(operand_se_buffers),
absl::MakeSpan(result_se_buffers), scratch,
params.stream, opts));
if (!params.stream->ok()) {
return Internal("ConvolutionThunk::ExecuteOnStream failed.");
}
return absl::OkStatus();
}
ConvolutionReorderThunk::ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices)
: Thunk(Kind::kConvolutionReorder, thunk_info),
filter_descriptor_(CreateFilterDescriptor(filter_nchw)),
operand_buffers_(operand_slices),
result_buffers_(result_slices) {}
absl::Status ConvolutionReorderThunk::ExecuteOnStream(
const ExecuteParams& params) {
bool has_bias = operand_buffers_.size() > 1;
CHECK_EQ(operand_buffers_.size(), result_buffers_.size());
const auto& buffer_allocations = *params.buffer_allocations;
auto filter_input = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(operand_buffers_[0]));
auto filter_output = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(result_buffers_[0]));
auto bias_input =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(operand_buffers_[1])))
: std::nullopt;
auto bias_output =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(result_buffers_[1])))
: std::nullopt;
auto dnn = params.stream->parent()->AsDnn();
if (dnn == nullptr) {
return absl::InternalError("No DNN for stream.");
}
return dnn->CudnnReorderConvolutionFilterAndBias(
params.stream, filter_descriptor_, filter_input, &filter_output,
std::move(bias_input), std::move(bias_output));
}
se::dnn::FilterDescriptor ConvolutionReorderThunk::CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw) {
CHECK_EQ(filter_nchw.size(), 4);
se::dnn::FilterDescriptor filter_desc(2);
filter_desc.set_layout(se::dnn::FilterLayout::kOutputInputYX32);
filter_desc.set_output_feature_map_count(filter_nchw[0]);
filter_desc.set_input_feature_map_count(filter_nchw[1]);
filter_desc.set_input_filter_height(filter_nchw[2]);
filter_desc.set_input_filter_width(filter_nchw[3]);
return filter_desc;
}
}
} | #include "xla/service/cpu/runtime/convolution_thunk.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "Eigen/Core"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
struct ConvolutionDimensions {
int batch_size = 1;
int input_size = 3;
int input_channels = 5;
int kernel_size = 3;
int output_channels = 3;
int output_size = input_size - kernel_size + 1;
};
template <typename T>
class ConvolutionThunkTypedTest : public ::testing::Test {};
using CorrectTypes = ::testing::Types<float, Eigen::half>;
TYPED_TEST_SUITE(ConvolutionThunkTypedTest, CorrectTypes);
std::vector<int64_t> MakeInputDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> input_dims = {dims.batch_size};
for (int i = 0; i < convolution_rank; ++i) {
input_dims.push_back(dims.input_size);
}
input_dims.push_back(dims.input_channels);
return input_dims;
}
std::vector<int64_t> MakeKernelDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> kernel_dims = {};
for (int i = 0; i < convolution_rank; ++i) {
kernel_dims.push_back(dims.kernel_size);
}
kernel_dims.push_back(dims.input_channels);
kernel_dims.push_back(dims.output_channels);
return kernel_dims;
}
std::vector<int64_t> MakeOutputDims(
int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> output_dims = {dims.batch_size};
for (int i = 0; i < convolution_rank; ++i) {
output_dims.push_back(dims.output_size);
}
output_dims.push_back(dims.output_channels);
return output_dims;
}
template <typename ElementType>
std::vector<ElementType> MakeDataVector(const std::vector<int64_t>& dims) {
auto size = absl::c_accumulate(dims, 1, std::multiplies<int>());
return std::vector<ElementType>(size, ElementType(0.0));
}
template <typename ElementType>
std::vector<MaybeOwningDeviceMemory> MakeBuffers(
const std::vector<ElementType>& input,
const std::vector<ElementType>& kernel,
const std::vector<ElementType>& output) {
std::vector<MaybeOwningDeviceMemory> buffers;
size_t input_size_in_bytes = input.size() * sizeof(ElementType);
buffers.emplace_back(se::DeviceMemoryBase(input.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(kernel.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(output.data(), output_size_in_bytes));
return buffers;
}
ConvolutionThunk::Options MakeConvolutionOptions() {
ConvolutionThunk::Options options;
options.multi_threaded = false;
options.use_acl = false;
return options;
}
ConvolutionDimensionNumbers MakeConvolutionDimensionNumbers(
int convolution_rank) {
ConvolutionDimensionNumbers dnums;
int dim = 0;
dnums.set_input_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_input_spatial_dimensions(dim++);
}
dnums.set_input_feature_dimension(dim++);
dim = 0;
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_kernel_spatial_dimensions(dim++);
}
dnums.set_kernel_input_feature_dimension(dim++);
dnums.set_kernel_output_feature_dimension(dim++);
dim = 0;
dnums.set_output_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_output_spatial_dimensions(dim++);
}
dnums.set_output_feature_dimension(dim++);
return dnums;
}
Window MakeWindow(int convolution_rank) {
Window window;
for (int i = 0; i < convolution_rank; ++i) {
WindowDimension* window_dim = window.add_dimensions();
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
return window;
}
template <typename ElementType>
class ConvolutionThunkBuilder {
public:
auto Build(int convolution_rank,
ConvolutionDimensions dims = ConvolutionDimensions()) {
auto input_dims = MakeInputDims(convolution_rank, dims);
auto kernel_dims = MakeKernelDims(convolution_rank, dims);
auto output_dims = MakeOutputDims(convolution_rank, dims);
input_ = MakeDataVector<ElementType>(input_dims);
kernel_ = MakeDataVector<ElementType>(kernel_dims);
output_ = MakeDataVector<ElementType>(output_dims);
size_t input_size_in_bytes = input_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(input_.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(kernel_.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(output_.data(), output_size_in_bytes));
allocations_ = std::make_unique<BufferAllocations>(buffers_);
input_alloc_ =
std::make_unique<BufferAllocation>(0, input_size_in_bytes, 0);
kernel_alloc_ =
std::make_unique<BufferAllocation>(1, kernel_size_in_bytes, 0);
output_alloc_ =
std::make_unique<BufferAllocation>(2, output_size_in_bytes, 0);
BufferAllocation::Slice input_slice(input_alloc_.get(), 0,
input_size_in_bytes);
BufferAllocation::Slice kernel_slice(kernel_alloc_.get(), 0,
kernel_size_in_bytes);
BufferAllocation::Slice output_slice(output_alloc_.get(), 0,
output_size_in_bytes);
auto primitive_type = primitive_util::NativeToPrimitiveType<ElementType>();
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_dims);
Shape kernel_shape = ShapeUtil::MakeShape(primitive_type, kernel_dims);
Shape output_shape = ShapeUtil::MakeShape(primitive_type, output_dims);
auto options = MakeConvolutionOptions();
auto dnums = MakeConvolutionDimensionNumbers(convolution_rank);
auto window = MakeWindow(convolution_rank);
return ConvolutionThunk::Create(
{"convolution"}, options, std::move(input_slice), input_shape,
std::move(kernel_slice), kernel_shape, std::move(output_slice),
output_shape, dnums, window,
1);
}
auto GetExecutionParams() {
return Thunk::ExecuteParams{nullptr, allocations_.get()};
}
private:
std::vector<ElementType> input_;
std::vector<ElementType> kernel_;
std::vector<ElementType> output_;
std::vector<MaybeOwningDeviceMemory> buffers_;
std::unique_ptr<BufferAllocations> allocations_;
std::unique_ptr<BufferAllocation> input_alloc_;
std::unique_ptr<BufferAllocation> kernel_alloc_;
std::unique_ptr<BufferAllocation> output_alloc_;
};
template <typename ElementType>
void SuccessfulConvolution(int convolution_rank) {
ConvolutionThunkBuilder<ElementType> builder;
TF_ASSERT_OK_AND_ASSIGN(auto thunk, builder.Build(convolution_rank))
Thunk::ExecuteParams params = builder.GetExecutionParams();
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError()) << execute_event.GetError();
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution1D) {
SuccessfulConvolution<TypeParam>(1);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution2D) {
SuccessfulConvolution<TypeParam>(2);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution3D) {
SuccessfulConvolution<TypeParam>(3);
}
TEST(ConvolutionThunkTest, CreationErrorOnUnsupportedType) {
ConvolutionThunkBuilder<int> builder;
auto status_or_thunk = builder.Build(2);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Unsupported element type (S32)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnIncorrectConvolutionRank) {
ConvolutionThunkBuilder<float> builder;
auto status_or_thunk = builder.Build(4);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Incorrect convolution rank (4)"));
}
}
} |
31 | #include "phonenumbers/logger.h"
#include <cstddef>
namespace i18n {
namespace phonenumbers {
Logger* Logger::impl_ = NULL;
}
} | #include <string>
#include <gtest/gtest.h>
#include "phonenumbers/base/memory/scoped_ptr.h"
#include "phonenumbers/default_logger.h"
#include "phonenumbers/logger.h"
namespace i18n {
namespace phonenumbers {
class StringLogger : public Logger {
public:
virtual ~StringLogger() {}
const string& message() const {
return msg_;
}
virtual void WriteMessage(const string& msg) {
msg_ += msg;
}
private:
string msg_;
};
class LoggerTest : public ::testing::Test {
protected:
virtual void SetUp() {
test_logger_.reset(new StringLogger());
test_logger_->set_level(LOG_INFO);
old_logger_ = Logger::mutable_logger_impl();
Logger::set_logger_impl(test_logger_.get());
}
virtual void TearDown() {
Logger::set_logger_impl(old_logger_);
}
scoped_ptr<StringLogger> test_logger_;
Logger* old_logger_;
};
TEST_F(LoggerTest, LoggerIgnoresHigherVerbosity) {
LOG(LOG_DEBUG) << "Hello";
EXPECT_EQ("", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsNewline) {
LOG(LOG_INFO) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerLogsEqualVerbosity) {
LOG(LOG_INFO) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerLogsMoreSeriousMessages) {
LOG(LOG_WARNING) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerConcatenatesMessages) {
LOG(LOG_INFO) << "Hello";
ASSERT_EQ("Hello\n", test_logger_->message());
LOG(LOG_INFO) << " World";
EXPECT_EQ("Hello\n World\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerHandlesDifferentTypes) {
LOG(LOG_INFO) << "Hello " << 42;
EXPECT_EQ("Hello 42\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerIgnoresVerboseLogs) {
VLOG(1) << "Hello";
EXPECT_EQ("", test_logger_->message());
VLOG(0) << "Hello";
EXPECT_EQ("", test_logger_->message());
test_logger_->set_level(LOG_DEBUG);
VLOG(1) << "Hello";
EXPECT_EQ("", test_logger_->message());
VLOG(0) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerShowsDebugLogsAtDebugLevel) {
test_logger_->set_level(LOG_DEBUG);
LOG(LOG_DEBUG) << "Debug hello";
EXPECT_EQ("Debug hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsDebugLogsWhenVerbositySet) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
LOG(LOG_DEBUG) << "Debug hello";
EXPECT_EQ("Debug hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsErrorLogsWhenVerbositySet) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
LOG(ERROR) << "Error hello";
EXPECT_EQ("Error hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsLogsAccordingToVerbosity) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
VLOG(verbose_log_level + 1) << "Hello 3";
EXPECT_EQ("", test_logger_->message());
VLOG(verbose_log_level - 1) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
VLOG(verbose_log_level) << "Hello 2";
EXPECT_EQ("Hello\nHello 2\n", test_logger_->message());
}
}
} |
32 | #ifndef TENSORFLOW_CC_TOOLS_FREEZE_SAVED_MODEL_H_
#define TENSORFLOW_CC_TOOLS_FREEZE_SAVED_MODEL_H_
#include <unordered_set>
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
Status FreezeSavedModel(const SavedModelBundle& saved_model_bundle,
GraphDef* frozen_graph_def,
std::unordered_set<string>* inputs,
std::unordered_set<string>* outputs);
}
#endif
#include "tensorflow/cc/tools/freeze_saved_model.h"
#include <iostream>
#include <queue>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace {
void GetTensorNamesFromTensorInfo(const TensorInfo& tensor_info,
std::unordered_set<string>* tensor_names) {
if (tensor_info.has_coo_sparse()) {
const TensorInfo_CooSparse& coo_sparse = tensor_info.coo_sparse();
tensor_names->insert(coo_sparse.values_tensor_name());
tensor_names->insert(coo_sparse.indices_tensor_name());
tensor_names->insert(coo_sparse.dense_shape_tensor_name());
} else if (tensor_info.has_composite_tensor()) {
for (const auto& component : tensor_info.composite_tensor().components()) {
tensor_names->insert(component.name());
}
} else {
tensor_names->insert(tensor_info.name());
}
}
void GetSignatureDefsInputsAndOutputs(
const SavedModelBundle& saved_model_bundle,
std::unordered_set<string>* inputs, std::unordered_set<string>* outputs) {
for (auto& sigdef_elem : saved_model_bundle.meta_graph_def.signature_def()) {
const SignatureDef& signature_def = sigdef_elem.second;
for (auto& input_elem : signature_def.inputs()) {
GetTensorNamesFromTensorInfo(input_elem.second, inputs);
}
for (auto& output_elem : signature_def.outputs()) {
GetTensorNamesFromTensorInfo(output_elem.second, outputs);
}
}
}
void GetNodeNameToNodeDefMap(
GraphDef* graph_def,
std::unordered_map<string, NodeDef*>* name_to_node_map) {
for (size_t i = 0; i < graph_def->node_size(); i++) {
NodeDef* node = graph_def->mutable_node(i);
(*name_to_node_map)[node->name()] = node;
}
}
const string GetNodeNameFromTensorName(string tensor_name) {
if (tensor_name[0] == '^') {
tensor_name.erase(0, 1);
}
std::vector<string> tensor_name_parts = str_util::Split(tensor_name, ':');
return tensor_name_parts[0];
}
void GetReachableNodesAndVariables(
GraphDef* graph_def, const std::unordered_set<string>& outputs,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string>* reachable_node_names,
std::unordered_set<string>* variable_node_names) {
static const std::unordered_set<string>* kVariableTypes =
new std::unordered_set<string>({"Variable", "VariableV2", "VarHandleOp"});
std::queue<string> nodes_to_visit;
for (const string& output_tensor_name : outputs) {
nodes_to_visit.push(GetNodeNameFromTensorName(output_tensor_name));
}
while (!nodes_to_visit.empty()) {
const string node_name = nodes_to_visit.front();
nodes_to_visit.pop();
if (reachable_node_names->find(node_name) != reachable_node_names->end()) {
continue;
}
reachable_node_names->insert(node_name);
NodeDef* node = name_to_node_map.at(node_name);
if (kVariableTypes->find(node->op()) != kVariableTypes->end()) {
variable_node_names->insert(node->name());
}
for (const string& input_tensor_name : node->input()) {
nodes_to_visit.push(GetNodeNameFromTensorName(input_tensor_name));
}
}
}
Status GetVariableNameToTensorMap(
Session* session,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string> variable_names_set,
std::unordered_map<string, Tensor>* variable_name_to_value_map) {
if (variable_names_set.empty()) {
return OkStatus();
}
std::vector<string> variable_names;
variable_names.reserve(variable_names_set.size());
std::vector<string> tensor_names;
tensor_names.reserve(variable_names_set.size());
for (const string& node_name : variable_names_set) {
variable_names.push_back(node_name);
NodeDef* node_def = name_to_node_map.at(node_name);
if (node_def->op() == "VarHandleOp") {
tensor_names.push_back(node_name + "/Read/ReadVariableOp:0");
} else {
tensor_names.push_back(node_name + ":0");
}
}
std::vector<Tensor> outputs;
TF_RETURN_IF_ERROR(
session->Run( {}, tensor_names, {}, &outputs));
for (size_t i = 0; i < variable_names.size(); i++) {
(*variable_name_to_value_map)[variable_names[i]] = outputs[i];
}
return OkStatus();
}
void ConvertVariableToConstant(const NodeDef& variable_node,
const Tensor& variable_value,
NodeDef* const_node) {
const_node->set_name(variable_node.name());
const_node->set_op("Const");
(*const_node->mutable_attr())["dtype"] = variable_node.attr().at("dtype");
variable_value.AsProtoTensorContent(
(*const_node->mutable_attr())["value"].mutable_tensor());
}
void ConvertReadVariableOpToIdentity(const NodeDef& node,
NodeDef* identity_node) {
identity_node->set_name(node.name());
identity_node->set_op("Identity");
(*identity_node->mutable_attr())["T"] = node.attr().at("dtype");
identity_node->add_input(node.input(0));
}
StatusOr<string> GetVarHandleName(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name) {
const NodeDef* node = name_to_node_map.at(node_name);
while (node->input_size() > 0) {
auto parent = name_to_node_map.find(node->input(0));
if (parent == name_to_node_map.end()) break;
node = parent->second;
if (node->op() != "Identity") {
VLOG(2) << "Stopping at non-identity node " << node->op();
break;
}
}
if (node->op() == "VarHandleOp") {
return node->name();
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
StatusOr<string> GetHandleNameIfNeedsToFreeze(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name, const std::unordered_set<string>& variable_node_names) {
StatusOr<string> var_handle_name =
GetVarHandleName(name_to_node_map, node_name);
if (var_handle_name.ok() && variable_node_names.count(*var_handle_name)) {
return var_handle_name;
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
Status FreezeGraphDef(const SavedModelBundle& saved_model_bundle,
const std::unordered_set<string>& outputs,
GraphDef* frozen_graph_def) {
GraphDef graph_def = saved_model_bundle.meta_graph_def.graph_def();
*frozen_graph_def->mutable_versions() = graph_def.versions();
*frozen_graph_def->mutable_library() = graph_def.library();
if (graph_def.node_size() == 0) {
return OkStatus();
}
std::unordered_map<string, NodeDef*> name_to_node_map;
GetNodeNameToNodeDefMap(&graph_def, &name_to_node_map);
std::unordered_set<string> reachable_node_names;
std::unordered_set<string> variable_node_names;
GetReachableNodesAndVariables(&graph_def, outputs, name_to_node_map,
&reachable_node_names, &variable_node_names);
std::unordered_map<string, Tensor> variable_to_value_map;
TF_RETURN_IF_ERROR(GetVariableNameToTensorMap(
saved_model_bundle.session.get(), name_to_node_map, variable_node_names,
&variable_to_value_map));
for (const NodeDef& node : graph_def.node()) {
if (reachable_node_names.find(node.name()) == reachable_node_names.end()) {
continue;
}
if (variable_node_names.find(node.name()) != variable_node_names.end()) {
ConvertVariableToConstant(node, variable_to_value_map[node.name()],
frozen_graph_def->add_node());
continue;
} else if (node.op() == "ReadVariableOp" &&
GetHandleNameIfNeedsToFreeze(name_to_node_map, node.name(),
variable_node_names)
.ok()) {
ConvertReadVariableOpToIdentity(node, frozen_graph_def->add_node());
continue;
} else if (node.op() == "Identity") {
StatusOr<string> handle_name = GetHandleNameIfNeedsToFreeze(
name_to_node_map, node.name(), variable_node_names);
if (handle_name.ok()) {
NodeDef* new_node = frozen_graph_def->add_node();
*new_node = node;
(*new_node->mutable_attr())["T"] =
name_to_node_map.at(*handle_name)->attr().at("dtype");
continue;
}
}
*frozen_graph_def->add_node() = node;
}
return OkStatus();
}
}
Status FreezeSavedModel(const SavedModelBundle& saved_model_bundle,
GraphDef* frozen_graph_def,
std::unordered_set<string>* inputs,
std::unordered_set<string>* outputs) {
GetSignatureDefsInputsAndOutputs(saved_model_bundle, inputs, outputs);
TF_RETURN_IF_ERROR(
FreezeGraphDef(saved_model_bundle, *outputs, frozen_graph_def));
return OkStatus();
}
} | #include "tensorflow/cc/tools/freeze_saved_model.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class FreezeTest : public ::testing::Test {
protected:
void GraphDefEqual(const GraphDef& actual, const GraphDef& expected) {
EXPECT_EQ(actual.ShortDebugString(), expected.ShortDebugString());
}
SignatureDef BuildSignatureDef(const std::unordered_set<string>& inputs,
const std::unordered_set<string>& outputs) {
SignatureDef signature_def;
for (const string& input : inputs) {
(*signature_def.mutable_inputs())[input].set_name(input);
}
for (const string& output : outputs) {
(*signature_def.mutable_outputs())[output].set_name(output);
}
return signature_def;
}
void AddSignatureDefToSavedModelBundle(const SignatureDef& signature_def,
const string& key,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
(*meta_graph_def->mutable_signature_def())[key] = signature_def;
}
Status InitializeSavedModelBundleSession(
const GraphDef& graph_def, const string& init_node,
SavedModelBundle* saved_model_bundle) {
SessionOptions session_options;
saved_model_bundle->session.reset(NewSession(session_options));
TF_RETURN_IF_ERROR(saved_model_bundle->session->Create(graph_def));
if (!init_node.empty()) {
std::vector<Tensor> outputs;
return saved_model_bundle->session->Run(
{}, {}, {init_node}, &outputs);
}
return OkStatus();
}
Status AddGraphDefToSavedModelBundle(const GraphDef& graph_def,
const string& init_node,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
*meta_graph_def->mutable_graph_def() = graph_def;
return InitializeSavedModelBundleSession(graph_def, init_node,
saved_model_bundle);
}
Status AddGraphDefWithOutputsToSavedModelBundle(
const GraphDef& graph_def, const std::unordered_set<string>& outputs,
const string& init_node, SavedModelBundle* saved_model_bundle) {
SignatureDef signature_def =
BuildSignatureDef(std::unordered_set<string>(), outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
saved_model_bundle);
return AddGraphDefToSavedModelBundle(graph_def, init_node,
saved_model_bundle);
}
void RunAndCompareFrozenAndUnfrozenGraphs(Session* unfrozen_session,
const GraphDef& frozen_graph_def,
const string& tensor_name) {
std::vector<Tensor> unfrozen_outputs;
TF_ASSERT_OK(unfrozen_session->Run( {}, {tensor_name},
{}, &unfrozen_outputs));
SessionOptions session_options;
std::unique_ptr<Session> frozen_session(NewSession(session_options));
TF_ASSERT_OK(frozen_session->Create(frozen_graph_def));
std::vector<Tensor> frozen_outputs;
TF_ASSERT_OK(frozen_session->Run( {}, {tensor_name},
{}, &frozen_outputs));
test::ExpectTensorEqual<float>(unfrozen_outputs[0], frozen_outputs[0]);
}
void TestFreezeGraphWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
Output read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), var, a);
}
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
GraphDef expected_graph_def;
Scope expected_scope = Scope::NewRootScope();
Output expected_a = ops::Const(expected_scope.WithOpName("a"), 10.0f, {});
Output expected_b = ops::Const(expected_scope.WithOpName("b"), 10.0f, {});
Output expected_c =
ops::Mul(expected_scope.WithOpName("c"), expected_a, expected_b);
TF_ASSERT_OK(expected_scope.ToGraphDef(&expected_graph_def));
GraphDefEqual(frozen_graph_def, expected_graph_def);
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithDependentVariables(bool use_resource,
bool use_identity = false) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
if (use_identity) {
Output identity = ops::Identity(scope.WithOpName("identity"), var);
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
identity, DataType::DT_FLOAT);
} else {
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
}
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output read_var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? (use_identity ? 5 : 4) : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithAndWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
Output var_1 =
ops::VarHandleOp(scope.WithOpName("var_1"), DataType::DT_FLOAT, {});
Output read_var_1 =
ops::ReadVariableOp(scope.WithOpName("var_1/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
auto assign_1 =
ops::AssignVariableOp(scope.WithOpName("assign_1"), var_1, a);
} else {
read_var = ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
Output var_1 =
ops::Variable(scope.WithOpName("var_1"), {}, DataType::DT_FLOAT);
Output assign_1 = ops::Assign(scope.WithOpName("assign_1"), var_1, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? 4 : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
};
TEST_F(FreezeTest, InputsAndOutputsSingleSignatureDef) {
SavedModelBundle saved_model_bundle;
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
SignatureDef signature_def =
BuildSignatureDef(expected_inputs, expected_outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsMultipleSignatureDefs) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def_0 = BuildSignatureDef({"input0:0"}, {"output0:0"});
SignatureDef signature_def_1 = BuildSignatureDef({"input1:0"}, {"output1:0"});
AddSignatureDefToSavedModelBundle(signature_def_0, "signature_def_0",
&saved_model_bundle);
AddSignatureDefToSavedModelBundle(signature_def_1, "signature_def_1",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, GraphDefVersionsAndLibrary) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
*graph_def.mutable_library()->add_function() = test::function::NonZero();
TF_ASSERT_OK(
AddGraphDefToSavedModelBundle(graph_def, "", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithNoVariables) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithMultiOutputOperation) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), {10.0f, 10.0f}, {2});
Output axis = ops::Const(scope.WithOpName("axis"), 0, {});
OutputList split = ops::Split(scope.WithOpName("split"), axis, a, 2).output;
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), split[1], b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithControlDependency) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output source = ops::Const(scope.WithOpName("source"), 10.0f, {});
Output a = ops::Const(scope.WithOpName("a").WithControlDependencies(source),
{10.0f, 10.0f}, {2});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithoutDependentVariables) {
TestFreezeGraphWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithoutDependentResourceVariables) {
TestFreezeGraphWithoutDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentVariables) {
TestFreezeGraphWithDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariables) {
TestFreezeGraphWithDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariablesAndIdentity) {
TestFreezeGraphWithDependentVariables(true, true);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentResourceVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(true);
}
TEST_F(FreezeTest, InputsAndOutputsCompositeTensorSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_composite_tensor()->add_components()->set_name("input1:0");
in.mutable_composite_tensor()->add_components()->set_name("input2:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_composite_tensor()->add_components()->set_name("output2:0");
out.mutable_composite_tensor()->add_components()->set_name("output1:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsSparseCooSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_coo_sparse()->set_values_tensor_name("input1:0");
in.mutable_coo_sparse()->set_indices_tensor_name("input2:0");
in.mutable_coo_sparse()->set_dense_shape_tensor_name("input3:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_coo_sparse()->set_values_tensor_name("output1:0");
out.mutable_coo_sparse()->set_indices_tensor_name("output2:0");
out.mutable_coo_sparse()->set_dense_shape_tensor_name("output3:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0",
"input3:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0",
"output3:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
}
} |
33 | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_BLOSC_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_BLOSC_H_
#include <cstddef>
#include <string>
#include <string_view>
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace blosc {
struct Options {
const char* compressor;
int clevel;
int shuffle;
size_t blocksize;
size_t element_size;
};
Result<std::string> Encode(std::string_view input, const Options& options);
Result<std::string> Decode(std::string_view input);
}
}
#endif
#include "tensorstore/internal/compression/blosc.h"
#include <cstddef>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include <blosc.h>
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace blosc {
Result<std::string> Encode(std::string_view input, const Options& options) {
if (input.size() > BLOSC_MAX_BUFFERSIZE) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Blosc compression input of ", input.size(),
" bytes exceeds maximum size of ", BLOSC_MAX_BUFFERSIZE));
}
std::string output(input.size() + BLOSC_MAX_OVERHEAD, '\0');
int shuffle = options.shuffle;
if (shuffle == -1) {
shuffle = options.element_size == 1 ? BLOSC_BITSHUFFLE : BLOSC_SHUFFLE;
}
const int n = blosc_compress_ctx(
options.clevel, shuffle, options.element_size, input.size(), input.data(),
output.data(), output.size(), options.compressor, options.blocksize,
1);
if (n < 0) {
return absl::InternalError(
tensorstore::StrCat("Internal blosc error: ", n));
}
output.erase(n);
return output;
}
Result<std::string> Decode(std::string_view input) {
size_t nbytes;
if (blosc_cbuffer_validate(input.data(), input.size(), &nbytes) != 0) {
return absl::InvalidArgumentError("Invalid blosc-compressed data");
}
std::string output(nbytes, '\0');
if (nbytes > 0) {
const int n =
blosc_decompress_ctx(input.data(), output.data(), output.size(),
1);
if (n <= 0) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Blosc error: ", n));
}
}
return output;
}
}
} | #include "tensorstore/internal/compression/blosc.h"
#include <cstddef>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <blosc.h>
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
namespace blosc = tensorstore::blosc;
std::vector<blosc::Options> GetTestOptions() {
return {
blosc::Options{"lz4", 5, -1, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 0, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4hc", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 9, BLOSC_BITSHUFFLE, 0},
blosc::Options{"zlib", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"zstd", 1, BLOSC_SHUFFLE, 0},
blosc::Options{"blosclz", 1, BLOSC_BITSHUFFLE, 0},
blosc::Options{"snappy", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 256},
blosc::Options{"lz4", 1, BLOSC_NOSHUFFLE, 256},
};
}
std::vector<std::string> GetTestArrays() {
std::vector<std::string> arrays;
arrays.emplace_back();
{
std::string arr(100, '\0');
unsigned char v = 0;
for (auto& x : arr) {
x = (v += 7);
}
arrays.push_back(std::move(arr));
}
arrays.push_back("The quick brown fox jumped over the lazy dog.");
return arrays;
}
TEST(BloscTest, EncodeDecode) {
for (blosc::Options options : GetTestOptions()) {
for (const auto& array : GetTestArrays()) {
for (const size_t element_size : {1, 2, 10}) {
options.element_size = element_size;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded, blosc::Decode(encoded));
EXPECT_EQ(array, decoded);
}
}
}
}
TEST(BloscTest, CheckComplib) {
const std::string_view array =
"The quick brown fox jumped over the lazy dog.";
const std::vector<std::pair<std::string, std::string>>
cnames_and_complib_names{{BLOSC_BLOSCLZ_COMPNAME, BLOSC_BLOSCLZ_LIBNAME},
{BLOSC_LZ4_COMPNAME, BLOSC_LZ4_LIBNAME},
{BLOSC_LZ4HC_COMPNAME, BLOSC_LZ4_LIBNAME},
{BLOSC_SNAPPY_COMPNAME, BLOSC_SNAPPY_LIBNAME},
{BLOSC_ZLIB_COMPNAME, BLOSC_ZLIB_LIBNAME},
{BLOSC_ZSTD_COMPNAME, BLOSC_ZSTD_LIBNAME}};
for (const auto& pair : cnames_and_complib_names) {
blosc::Options options{pair.first.c_str(), 5,
-1, 0,
1};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
const char* complib = blosc_cbuffer_complib(encoded.data());
EXPECT_EQ(pair.second, complib);
}
}
TEST(BloscTest, CheckShuffleAndElementSize) {
const std::string_view array =
"The quick brown fox jumped over the lazy dog.";
for (int shuffle = -1; shuffle <= 2; ++shuffle) {
for (const size_t element_size : {1, 2, 10}) {
blosc::Options options{"lz4", 5,
shuffle, 0,
element_size};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
size_t typesize;
int flags;
blosc_cbuffer_metainfo(encoded.data(), &typesize, &flags);
EXPECT_EQ(element_size, typesize);
const bool expected_byte_shuffle =
shuffle == 1 || (shuffle == -1 && element_size != 1);
const bool expected_bit_shuffle =
shuffle == 2 || (shuffle == -1 && element_size == 1);
EXPECT_EQ(expected_byte_shuffle,
static_cast<bool>(flags & BLOSC_DOSHUFFLE));
EXPECT_EQ(expected_bit_shuffle,
static_cast<bool>(flags & BLOSC_DOBITSHUFFLE));
}
}
}
TEST(BloscTest, CheckBlocksize) {
const std::string array(100000, '\0');
for (size_t blocksize : {256, 512, 1024}) {
blosc::Options options{"lz4", 0,
0, blocksize,
1};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
size_t nbytes, cbytes, bsize;
blosc_cbuffer_sizes(encoded.data(), &nbytes, &cbytes, &bsize);
EXPECT_EQ(blocksize, bsize);
}
}
TEST(BloscTest, TooLong) {
blosc::Options options{"lz4", 5,
-1, 0,
1};
EXPECT_THAT(
blosc::Encode(std::string(BLOSC_MAX_BUFFERSIZE + 1, '\0'), options),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscTest, DecodeHeaderCorrupted) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
ASSERT_GE(encoded.size(), 1);
std::string corrupted = std::move(encoded);
corrupted[0] = 0;
EXPECT_THAT(blosc::Decode(corrupted),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressorTest, DecodeHeaderTruncated) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
ASSERT_GE(encoded.size(), 5);
EXPECT_THAT(blosc::Decode(std::string_view(encoded).substr(0, 5)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressorTest, DecodeDataTruncated) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
EXPECT_THAT(blosc::Decode(
std::string_view(encoded).substr(0, BLOSC_MIN_HEADER_LENGTH)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} |
34 | #ifndef QUICHE_HTTP2_HPACK_DECODER_HPACK_ENTRY_TYPE_DECODER_H_
#define QUICHE_HTTP2_HPACK_DECODER_HPACK_ENTRY_TYPE_DECODER_H_
#include <cstdint>
#include <string>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/http2/hpack/varint/hpack_varint_decoder.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
class QUICHE_EXPORT HpackEntryTypeDecoder {
public:
DecodeStatus Start(DecodeBuffer* db);
DecodeStatus Resume(DecodeBuffer* db) { return varint_decoder_.Resume(db); }
HpackEntryType entry_type() const { return entry_type_; }
uint64_t varint() const { return varint_decoder_.value(); }
std::string DebugString() const;
private:
HpackVarintDecoder varint_decoder_;
HpackEntryType entry_type_ = HpackEntryType::kIndexedHeader;
};
QUICHE_EXPORT std::ostream& operator<<(std::ostream& out,
const HpackEntryTypeDecoder& v);
}
#endif
#include "quiche/http2/hpack/decoder/hpack_entry_type_decoder.h"
#include <ios>
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::string HpackEntryTypeDecoder::DebugString() const {
return absl::StrCat(
"HpackEntryTypeDecoder(varint_decoder=", varint_decoder_.DebugString(),
", entry_type=", entry_type_, ")");
}
std::ostream& operator<<(std::ostream& out, const HpackEntryTypeDecoder& v) {
return out << v.DebugString();
}
DecodeStatus HpackEntryTypeDecoder::Start(DecodeBuffer* db) {
QUICHE_DCHECK(db != nullptr);
QUICHE_DCHECK(db->HasData());
uint8_t byte = db->DecodeUInt8();
switch (byte) {
case 0b00000000:
case 0b00000001:
case 0b00000010:
case 0b00000011:
case 0b00000100:
case 0b00000101:
case 0b00000110:
case 0b00000111:
case 0b00001000:
case 0b00001001:
case 0b00001010:
case 0b00001011:
case 0b00001100:
case 0b00001101:
case 0b00001110:
entry_type_ = HpackEntryType::kUnindexedLiteralHeader;
varint_decoder_.set_value(byte);
return DecodeStatus::kDecodeDone;
case 0b00001111:
entry_type_ = HpackEntryType::kUnindexedLiteralHeader;
return varint_decoder_.StartExtended(4, db);
case 0b00010000:
case 0b00010001:
case 0b00010010:
case 0b00010011:
case 0b00010100:
case 0b00010101:
case 0b00010110:
case 0b00010111:
case 0b00011000:
case 0b00011001:
case 0b00011010:
case 0b00011011:
case 0b00011100:
case 0b00011101:
case 0b00011110:
entry_type_ = HpackEntryType::kNeverIndexedLiteralHeader;
varint_decoder_.set_value(byte & 0x0f);
return DecodeStatus::kDecodeDone;
case 0b00011111:
entry_type_ = HpackEntryType::kNeverIndexedLiteralHeader;
return varint_decoder_.StartExtended(4, db);
case 0b00100000:
case 0b00100001:
case 0b00100010:
case 0b00100011:
case 0b00100100:
case 0b00100101:
case 0b00100110:
case 0b00100111:
case 0b00101000:
case 0b00101001:
case 0b00101010:
case 0b00101011:
case 0b00101100:
case 0b00101101:
case 0b00101110:
case 0b00101111:
case 0b00110000:
case 0b00110001:
case 0b00110010:
case 0b00110011:
case 0b00110100:
case 0b00110101:
case 0b00110110:
case 0b00110111:
case 0b00111000:
case 0b00111001:
case 0b00111010:
case 0b00111011:
case 0b00111100:
case 0b00111101:
case 0b00111110:
entry_type_ = HpackEntryType::kDynamicTableSizeUpdate;
varint_decoder_.set_value(byte & 0x01f);
return DecodeStatus::kDecodeDone;
case 0b00111111:
entry_type_ = HpackEntryType::kDynamicTableSizeUpdate;
return varint_decoder_.StartExtended(5, db);
case 0b01000000:
case 0b01000001:
case 0b01000010:
case 0b01000011:
case 0b01000100:
case 0b01000101:
case 0b01000110:
case 0b01000111:
case 0b01001000:
case 0b01001001:
case 0b01001010:
case 0b01001011:
case 0b01001100:
case 0b01001101:
case 0b01001110:
case 0b01001111:
case 0b01010000:
case 0b01010001:
case 0b01010010:
case 0b01010011:
case 0b01010100:
case 0b01010101:
case 0b01010110:
case 0b01010111:
case 0b01011000:
case 0b01011001:
case 0b01011010:
case 0b01011011:
case 0b01011100:
case 0b01011101:
case 0b01011110:
case 0b01011111:
case 0b01100000:
case 0b01100001:
case 0b01100010:
case 0b01100011:
case 0b01100100:
case 0b01100101:
case 0b01100110:
case 0b01100111:
case 0b01101000:
case 0b01101001:
case 0b01101010:
case 0b01101011:
case 0b01101100:
case 0b01101101:
case 0b01101110:
case 0b01101111:
case 0b01110000:
case 0b01110001:
case 0b01110010:
case 0b01110011:
case 0b01110100:
case 0b01110101:
case 0b01110110:
case 0b01110111:
case 0b01111000:
case 0b01111001:
case 0b01111010:
case 0b01111011:
case 0b01111100:
case 0b01111101:
case 0b01111110:
entry_type_ = HpackEntryType::kIndexedLiteralHeader;
varint_decoder_.set_value(byte & 0x03f);
return DecodeStatus::kDecodeDone;
case 0b01111111:
entry_type_ = HpackEntryType::kIndexedLiteralHeader;
return varint_decoder_.StartExtended(6, db);
case 0b10000000:
case 0b10000001:
case 0b10000010:
case 0b10000011:
case 0b10000100:
case 0b10000101:
case 0b10000110:
case 0b10000111:
case 0b10001000:
case 0b10001001:
case 0b10001010:
case 0b10001011:
case 0b10001100:
case 0b10001101:
case 0b10001110:
case 0b10001111:
case 0b10010000:
case 0b10010001:
case 0b10010010:
case 0b10010011:
case 0b10010100:
case 0b10010101:
case 0b10010110:
case 0b10010111:
case 0b10011000:
case 0b10011001:
case 0b10011010:
case 0b10011011:
case 0b10011100:
case 0b10011101:
case 0b10011110:
case 0b10011111:
case 0b10100000:
case 0b10100001:
case 0b10100010:
case 0b10100011:
case 0b10100100:
case 0b10100101:
case 0b10100110:
case 0b10100111:
case 0b10101000:
case 0b10101001:
case 0b10101010:
case 0b10101011:
case 0b10101100:
case 0b10101101:
case 0b10101110:
case 0b10101111:
case 0b10110000:
case 0b10110001:
case 0b10110010:
case 0b10110011:
case 0b10110100:
case 0b10110101:
case 0b10110110:
case 0b10110111:
case 0b10111000:
case 0b10111001:
case 0b10111010:
case 0b10111011:
case 0b10111100:
case 0b10111101:
case 0b10111110:
case 0b10111111:
case 0b11000000:
case 0b11000001:
case 0b11000010:
case 0b11000011:
case 0b11000100:
case 0b11000101:
case 0b11000110:
case 0b11000111:
case 0b11001000:
case 0b11001001:
case 0b11001010:
case 0b11001011:
case 0b11001100:
case 0b11001101:
case 0b11001110:
case 0b11001111:
case 0b11010000:
case 0b11010001:
case 0b11010010:
case 0b11010011:
case 0b11010100:
case 0b11010101:
case 0b11010110:
case 0b11010111:
case 0b11011000:
case 0b11011001:
case 0b11011010:
case 0b11011011:
case 0b11011100:
case 0b11011101:
case 0b11011110:
case 0b11011111:
case 0b11100000:
case 0b11100001:
case 0b11100010:
case 0b11100011:
case 0b11100100:
case 0b11100101:
case 0b11100110:
case 0b11100111:
case 0b11101000:
case 0b11101001:
case 0b11101010:
case 0b11101011:
case 0b11101100:
case 0b11101101:
case 0b11101110:
case 0b11101111:
case 0b11110000:
case 0b11110001:
case 0b11110010:
case 0b11110011:
case 0b11110100:
case 0b11110101:
case 0b11110110:
case 0b11110111:
case 0b11111000:
case 0b11111001:
case 0b11111010:
case 0b11111011:
case 0b11111100:
case 0b11111101:
case 0b11111110:
entry_type_ = HpackEntryType::kIndexedHeader;
varint_decoder_.set_value(byte & 0x07f);
return DecodeStatus::kDecodeDone;
case 0b11111111:
entry_type_ = HpackEntryType::kIndexedHeader;
return varint_decoder_.StartExtended(7, db);
}
QUICHE_BUG(http2_bug_66_1)
<< "Unreachable, byte=" << std::hex << static_cast<uint32_t>(byte);
QUICHE_CODE_COUNT_N(decompress_failure_3, 17, 23);
return DecodeStatus::kDecodeError;
}
} | #include "quiche/http2/hpack/decoder/hpack_entry_type_decoder.h"
#include <vector>
#include "quiche/http2/test_tools/hpack_block_builder.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::AssertionSuccess;
namespace http2 {
namespace test {
namespace {
const bool kReturnNonZeroOnFirst = true;
class HpackEntryTypeDecoderTest : public RandomDecoderTest {
protected:
DecodeStatus StartDecoding(DecodeBuffer* b) override {
QUICHE_CHECK_LT(0u, b->Remaining());
return decoder_.Start(b);
}
DecodeStatus ResumeDecoding(DecodeBuffer* b) override {
return decoder_.Resume(b);
}
HpackEntryTypeDecoder decoder_;
};
TEST_F(HpackEntryTypeDecoderTest, DynamicTableSizeUpdate) {
for (uint32_t size = 0; size < 1000 * 1000; size += 256) {
HpackBlockBuilder bb;
bb.AppendDynamicTableSizeUpdate(size);
DecodeBuffer db(bb.buffer());
auto validator = [size, this]() -> AssertionResult {
HTTP2_VERIFY_EQ(HpackEntryType::kDynamicTableSizeUpdate,
decoder_.entry_type());
HTTP2_VERIFY_EQ(size, decoder_.varint());
return AssertionSuccess();
};
EXPECT_TRUE(DecodeAndValidateSeveralWays(&db, kReturnNonZeroOnFirst,
ValidateDoneAndEmpty(validator)))
<< "\nentry_type=kDynamicTableSizeUpdate, size=" << size;
EXPECT_TRUE(validator());
}
}
TEST_F(HpackEntryTypeDecoderTest, HeaderWithIndex) {
std::vector<HpackEntryType> entry_types = {
HpackEntryType::kIndexedHeader,
HpackEntryType::kIndexedLiteralHeader,
HpackEntryType::kUnindexedLiteralHeader,
HpackEntryType::kNeverIndexedLiteralHeader,
};
for (const HpackEntryType entry_type : entry_types) {
const uint32_t first = entry_type == HpackEntryType::kIndexedHeader ? 1 : 0;
for (uint32_t index = first; index < 1000; ++index) {
HpackBlockBuilder bb;
bb.AppendEntryTypeAndVarint(entry_type, index);
DecodeBuffer db(bb.buffer());
auto validator = [entry_type, index, this]() -> AssertionResult {
HTTP2_VERIFY_EQ(entry_type, decoder_.entry_type());
HTTP2_VERIFY_EQ(index, decoder_.varint());
return AssertionSuccess();
};
EXPECT_TRUE(DecodeAndValidateSeveralWays(&db, kReturnNonZeroOnFirst,
ValidateDoneAndEmpty(validator)))
<< "\nentry_type=" << entry_type << ", index=" << index;
EXPECT_TRUE(validator());
}
}
}
}
}
} |
35 | #ifndef TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_
#define TENSORFLOW_TSL_PROFILER_LIB_PROFILER_LOCK_H_
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/statusor.h"
namespace tsl {
namespace profiler {
constexpr absl::string_view kProfilerLockContention =
"Another profiling session active.";
class ProfilerLock {
public:
static bool HasActiveSession();
static absl::StatusOr<ProfilerLock> Acquire();
ProfilerLock() = default;
ProfilerLock(const ProfilerLock&) = delete;
ProfilerLock& operator=(const ProfilerLock&) = delete;
ProfilerLock(ProfilerLock&& other)
: active_(std::exchange(other.active_, false)) {}
ProfilerLock& operator=(ProfilerLock&& other) {
active_ = std::exchange(other.active_, false);
return *this;
}
~ProfilerLock() { ReleaseIfActive(); }
void ReleaseIfActive();
bool Active() const { return active_; }
private:
explicit ProfilerLock(bool active) : active_(active) {}
bool active_ = false;
};
}
}
#endif
#include "tsl/profiler/lib/profiler_lock.h"
#include <atomic>
#include "absl/status/statusor.h"
#include "xla/tsl/util/env_var.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
namespace tsl {
namespace profiler {
namespace {
std::atomic<int> g_session_active = ATOMIC_VAR_INIT(0);
static_assert(ATOMIC_INT_LOCK_FREE == 2, "Assumed atomic<int> was lock free");
}
bool ProfilerLock::HasActiveSession() {
return g_session_active.load(std::memory_order_relaxed) != 0;
}
absl::StatusOr<ProfilerLock> ProfilerLock::Acquire() {
static bool tf_profiler_disabled = [] {
bool disabled = false;
ReadBoolFromEnvVar("TF_DISABLE_PROFILING", false, &disabled).IgnoreError();
return disabled;
}();
if (TF_PREDICT_FALSE(tf_profiler_disabled)) {
return errors::AlreadyExists(
"TensorFlow Profiler is permanently disabled by env var "
"TF_DISABLE_PROFILING.");
}
int already_active = g_session_active.exchange(1, std::memory_order_acq_rel);
if (already_active) {
return errors::AlreadyExists(kProfilerLockContention);
}
return ProfilerLock(true);
}
void ProfilerLock::ReleaseIfActive() {
if (active_) {
g_session_active.store(0, std::memory_order_release);
active_ = false;
}
}
}
} | #include "tsl/profiler/lib/profiler_lock.h"
#include <utility>
#include "absl/status/statusor.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ProfilerLockTest, DefaultConstructorCreatesInactiveInstance) {
ProfilerLock profiler_lock;
EXPECT_FALSE(profiler_lock.Active());
}
TEST(ProfilerLockTest, AcquireAndReleaseExplicitly) {
absl::StatusOr<ProfilerLock> profiler_lock = ProfilerLock::Acquire();
ASSERT_TRUE(profiler_lock.ok());
EXPECT_TRUE(profiler_lock->Active());
profiler_lock->ReleaseIfActive();
EXPECT_FALSE(profiler_lock->Active());
}
TEST(ProfilerLockTest, AcquireAndReleaseOnDestruction) {
absl::StatusOr<ProfilerLock> profiler_lock = ProfilerLock::Acquire();
ASSERT_TRUE(profiler_lock.ok());
EXPECT_TRUE(profiler_lock->Active());
}
TEST(ProfilerLockTest, ReacquireWithoutReleaseFails) {
absl::StatusOr<ProfilerLock> profiler_lock_1 = ProfilerLock::Acquire();
absl::StatusOr<ProfilerLock> profiler_lock_2 = ProfilerLock::Acquire();
ASSERT_TRUE(profiler_lock_1.ok());
EXPECT_TRUE(profiler_lock_1->Active());
EXPECT_FALSE(profiler_lock_2.ok());
}
TEST(ProfilerLockTest, ReacquireAfterReleaseSucceeds) {
auto profiler_lock_1 = ProfilerLock::Acquire();
ASSERT_TRUE(profiler_lock_1.ok());
ASSERT_TRUE(profiler_lock_1->Active());
profiler_lock_1->ReleaseIfActive();
ASSERT_FALSE(profiler_lock_1->Active());
auto profiler_lock_2 = ProfilerLock::Acquire();
EXPECT_TRUE(profiler_lock_2.ok());
EXPECT_TRUE(profiler_lock_2->Active());
}
TEST(ProfilerLockTest, InactiveAfterMove) {
absl::StatusOr<ProfilerLock> profiler_lock_1 = ProfilerLock::Acquire();
ASSERT_TRUE(profiler_lock_1.ok());
ASSERT_TRUE(profiler_lock_1->Active());
ProfilerLock profiler_lock_2 = std::move(*profiler_lock_1);
EXPECT_FALSE(profiler_lock_1->Active());
EXPECT_TRUE(profiler_lock_2.Active());
}
}
}
} |
36 | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_VALUE_EXPORT_UTIL_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_VALUE_EXPORT_UTIL_H_
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/arena.h"
#include "absl/status/status.h"
#include "eval/public/cel_value.h"
namespace google::api::expr::runtime {
absl::Status ExportAsProtoValue(const CelValue& in_value,
google::protobuf::Value* out_value,
google::protobuf::Arena* arena);
inline absl::Status ExportAsProtoValue(const CelValue& in_value,
google::protobuf::Value* out_value) {
google::protobuf::Arena arena;
return ExportAsProtoValue(in_value, out_value, &arena);
}
}
#endif
#include "eval/public/value_export_util.h"
#include <string>
#include "google/protobuf/util/json_util.h"
#include "google/protobuf/util/time_util.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "internal/proto_time_encoding.h"
namespace google::api::expr::runtime {
using google::protobuf::Duration;
using google::protobuf::Timestamp;
using google::protobuf::Value;
using google::protobuf::util::TimeUtil;
absl::Status KeyAsString(const CelValue& value, std::string* key) {
switch (value.type()) {
case CelValue::Type::kInt64: {
*key = absl::StrCat(value.Int64OrDie());
break;
}
case CelValue::Type::kUint64: {
*key = absl::StrCat(value.Uint64OrDie());
break;
}
case CelValue::Type::kString: {
key->assign(value.StringOrDie().value().data(),
value.StringOrDie().value().size());
break;
}
default: {
return absl::InvalidArgumentError("Unsupported map type");
}
}
return absl::OkStatus();
}
absl::Status ExportAsProtoValue(const CelValue& in_value, Value* out_value,
google::protobuf::Arena* arena) {
if (in_value.IsNull()) {
out_value->set_null_value(google::protobuf::NULL_VALUE);
return absl::OkStatus();
}
switch (in_value.type()) {
case CelValue::Type::kBool: {
out_value->set_bool_value(in_value.BoolOrDie());
break;
}
case CelValue::Type::kInt64: {
out_value->set_number_value(static_cast<double>(in_value.Int64OrDie()));
break;
}
case CelValue::Type::kUint64: {
out_value->set_number_value(static_cast<double>(in_value.Uint64OrDie()));
break;
}
case CelValue::Type::kDouble: {
out_value->set_number_value(in_value.DoubleOrDie());
break;
}
case CelValue::Type::kString: {
auto value = in_value.StringOrDie().value();
out_value->set_string_value(value.data(), value.size());
break;
}
case CelValue::Type::kBytes: {
absl::Base64Escape(in_value.BytesOrDie().value(),
out_value->mutable_string_value());
break;
}
case CelValue::Type::kDuration: {
Duration duration;
auto status =
cel::internal::EncodeDuration(in_value.DurationOrDie(), &duration);
if (!status.ok()) {
return status;
}
out_value->set_string_value(TimeUtil::ToString(duration));
break;
}
case CelValue::Type::kTimestamp: {
Timestamp timestamp;
auto status =
cel::internal::EncodeTime(in_value.TimestampOrDie(), ×tamp);
if (!status.ok()) {
return status;
}
out_value->set_string_value(TimeUtil::ToString(timestamp));
break;
}
case CelValue::Type::kMessage: {
google::protobuf::util::JsonPrintOptions json_options;
json_options.preserve_proto_field_names = true;
std::string json;
auto status = google::protobuf::util::MessageToJsonString(*in_value.MessageOrDie(),
&json, json_options);
if (!status.ok()) {
return absl::InternalError(status.ToString());
}
google::protobuf::util::JsonParseOptions json_parse_options;
status = google::protobuf::util::JsonStringToMessage(json, out_value,
json_parse_options);
if (!status.ok()) {
return absl::InternalError(status.ToString());
}
break;
}
case CelValue::Type::kList: {
const CelList* cel_list = in_value.ListOrDie();
auto out_values = out_value->mutable_list_value();
for (int i = 0; i < cel_list->size(); i++) {
auto status = ExportAsProtoValue((*cel_list).Get(arena, i),
out_values->add_values(), arena);
if (!status.ok()) {
return status;
}
}
break;
}
case CelValue::Type::kMap: {
const CelMap* cel_map = in_value.MapOrDie();
CEL_ASSIGN_OR_RETURN(auto keys_list, cel_map->ListKeys(arena));
auto out_values = out_value->mutable_struct_value()->mutable_fields();
for (int i = 0; i < keys_list->size(); i++) {
std::string key;
CelValue map_key = (*keys_list).Get(arena, i);
auto status = KeyAsString(map_key, &key);
if (!status.ok()) {
return status;
}
auto map_value_ref = (*cel_map).Get(arena, map_key);
CelValue map_value =
(map_value_ref) ? map_value_ref.value() : CelValue();
status = ExportAsProtoValue(map_value, &((*out_values)[key]), arena);
if (!status.ok()) {
return status;
}
}
break;
}
default: {
return absl::InvalidArgumentError("Unsupported value type");
}
}
return absl::OkStatus();
}
} | #include "eval/public/value_export_util.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google::api::expr::runtime {
namespace {
using google::protobuf::Duration;
using google::protobuf::ListValue;
using google::protobuf::Struct;
using google::protobuf::Timestamp;
using google::protobuf::Value;
using google::protobuf::Arena;
TEST(ValueExportUtilTest, ConvertBoolValue) {
CelValue cel_value = CelValue::CreateBool(true);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kBoolValue);
EXPECT_EQ(value.bool_value(), true);
}
TEST(ValueExportUtilTest, ConvertInt64Value) {
CelValue cel_value = CelValue::CreateInt64(-1);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kNumberValue);
EXPECT_DOUBLE_EQ(value.number_value(), -1);
}
TEST(ValueExportUtilTest, ConvertUint64Value) {
CelValue cel_value = CelValue::CreateUint64(1);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kNumberValue);
EXPECT_DOUBLE_EQ(value.number_value(), 1);
}
TEST(ValueExportUtilTest, ConvertDoubleValue) {
CelValue cel_value = CelValue::CreateDouble(1.3);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kNumberValue);
EXPECT_DOUBLE_EQ(value.number_value(), 1.3);
}
TEST(ValueExportUtilTest, ConvertStringValue) {
std::string test = "test";
CelValue cel_value = CelValue::CreateString(&test);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "test");
}
TEST(ValueExportUtilTest, ConvertBytesValue) {
std::string test = "test";
CelValue cel_value = CelValue::CreateBytes(&test);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "dGVzdA==");
}
TEST(ValueExportUtilTest, ConvertDurationValue) {
Duration duration;
duration.set_seconds(2);
duration.set_nanos(3);
CelValue cel_value = CelProtoWrapper::CreateDuration(&duration);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "2.000000003s");
}
TEST(ValueExportUtilTest, ConvertTimestampValue) {
Timestamp timestamp;
timestamp.set_seconds(1000000000);
timestamp.set_nanos(3);
CelValue cel_value = CelProtoWrapper::CreateTimestamp(×tamp);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStringValue);
EXPECT_EQ(value.string_value(), "2001-09-09T01:46:40.000000003Z");
}
TEST(ValueExportUtilTest, ConvertStructMessage) {
Struct struct_msg;
(*struct_msg.mutable_fields())["string_value"].set_string_value("test");
Arena arena;
CelValue cel_value = CelProtoWrapper::CreateMessage(&struct_msg, &arena);
Value value;
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
EXPECT_THAT(value.struct_value(), testutil::EqualsProto(struct_msg));
}
TEST(ValueExportUtilTest, ConvertValueMessage) {
Value value_in;
(*value_in.mutable_struct_value()->mutable_fields())["boolean_value"]
.set_bool_value(true);
Arena arena;
CelValue cel_value = CelProtoWrapper::CreateMessage(&value_in, &arena);
Value value_out;
EXPECT_OK(ExportAsProtoValue(cel_value, &value_out));
EXPECT_THAT(value_in, testutil::EqualsProto(value_out));
}
TEST(ValueExportUtilTest, ConvertListValueMessage) {
ListValue list_value;
list_value.add_values()->set_string_value("test");
list_value.add_values()->set_bool_value(true);
Arena arena;
CelValue cel_value = CelProtoWrapper::CreateMessage(&list_value, &arena);
Value value_out;
EXPECT_OK(ExportAsProtoValue(cel_value, &value_out));
EXPECT_THAT(list_value, testutil::EqualsProto(value_out.list_value()));
}
TEST(ValueExportUtilTest, ConvertRepeatedBoolValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_bool_list(true);
msg->add_bool_list(false);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("bool_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).bool_value(), true);
EXPECT_EQ(list_value.list_value().values(1).bool_value(), false);
}
TEST(ValueExportUtilTest, ConvertRepeatedInt32Value) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_int32_list(2);
msg->add_int32_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("int32_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_DOUBLE_EQ(list_value.list_value().values(0).number_value(), 2);
EXPECT_DOUBLE_EQ(list_value.list_value().values(1).number_value(), 3);
}
TEST(ValueExportUtilTest, ConvertRepeatedInt64Value) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_int64_list(2);
msg->add_int64_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("int64_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "2");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "3");
}
TEST(ValueExportUtilTest, ConvertRepeatedUint64Value) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_uint64_list(2);
msg->add_uint64_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("uint64_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "2");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "3");
}
TEST(ValueExportUtilTest, ConvertRepeatedDoubleValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_double_list(2);
msg->add_double_list(3);
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("double_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_DOUBLE_EQ(list_value.list_value().values(0).number_value(), 2);
EXPECT_DOUBLE_EQ(list_value.list_value().values(1).number_value(), 3);
}
TEST(ValueExportUtilTest, ConvertRepeatedStringValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_string_list("test1");
msg->add_string_list("test2");
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("string_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "test1");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "test2");
}
TEST(ValueExportUtilTest, ConvertRepeatedBytesValue) {
Arena arena;
Value value;
TestMessage* msg = Arena::Create<TestMessage>(&arena);
msg->add_bytes_list("test1");
msg->add_bytes_list("test2");
CelValue cel_value = CelProtoWrapper::CreateMessage(msg, &arena);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
Value list_value = value.struct_value().fields().at("bytes_list");
EXPECT_TRUE(list_value.has_list_value());
EXPECT_EQ(list_value.list_value().values(0).string_value(), "dGVzdDE=");
EXPECT_EQ(list_value.list_value().values(1).string_value(), "dGVzdDI=");
}
TEST(ValueExportUtilTest, ConvertCelList) {
Arena arena;
Value value;
std::vector<CelValue> values;
values.push_back(CelValue::CreateInt64(2));
values.push_back(CelValue::CreateInt64(3));
CelList *cel_list = Arena::Create<ContainerBackedListImpl>(&arena, values);
CelValue cel_value = CelValue::CreateList(cel_list);
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kListValue);
EXPECT_DOUBLE_EQ(value.list_value().values(0).number_value(), 2);
EXPECT_DOUBLE_EQ(value.list_value().values(1).number_value(), 3);
}
TEST(ValueExportUtilTest, ConvertCelMapWithStringKey) {
Value value;
std::vector<std::pair<CelValue, CelValue>> map_entries;
std::string key1 = "key1";
std::string key2 = "key2";
std::string value1 = "value1";
std::string value2 = "value2";
map_entries.push_back(
{CelValue::CreateString(&key1), CelValue::CreateString(&value1)});
map_entries.push_back(
{CelValue::CreateString(&key2), CelValue::CreateString(&value2)});
auto cel_map = CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(map_entries))
.value();
CelValue cel_value = CelValue::CreateMap(cel_map.get());
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
const auto& fields = value.struct_value().fields();
EXPECT_EQ(fields.at(key1).string_value(), value1);
EXPECT_EQ(fields.at(key2).string_value(), value2);
}
TEST(ValueExportUtilTest, ConvertCelMapWithInt64Key) {
Value value;
std::vector<std::pair<CelValue, CelValue>> map_entries;
int key1 = -1;
int key2 = 2;
std::string value1 = "value1";
std::string value2 = "value2";
map_entries.push_back(
{CelValue::CreateInt64(key1), CelValue::CreateString(&value1)});
map_entries.push_back(
{CelValue::CreateInt64(key2), CelValue::CreateString(&value2)});
auto cel_map = CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(map_entries))
.value();
CelValue cel_value = CelValue::CreateMap(cel_map.get());
EXPECT_OK(ExportAsProtoValue(cel_value, &value));
EXPECT_EQ(value.kind_case(), Value::KindCase::kStructValue);
const auto& fields = value.struct_value().fields();
EXPECT_EQ(fields.at(absl::StrCat(key1)).string_value(), value1);
EXPECT_EQ(fields.at(absl::StrCat(key2)).string_value(), value2);
}
}
} |
37 | #ifndef TENSORFLOW_TSL_LIB_IO_INPUTBUFFER_H_
#define TENSORFLOW_TSL_LIB_IO_INPUTBUFFER_H_
#include <string>
#include "tsl/platform/coding.h"
#include "tsl/platform/env.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/status.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace io {
class InputBuffer {
public:
InputBuffer(RandomAccessFile* file, size_t buffer_bytes);
~InputBuffer();
template <typename T>
absl::Status ReadLine(T* result);
absl::Status ReadNBytes(int64_t bytes_to_read, std::string* result);
absl::Status ReadNBytes(int64_t bytes_to_read, char* result,
size_t* bytes_read);
absl::Status ReadVarint32(uint32* result);
absl::Status ReadVarint64(uint64* result);
absl::Status SkipNBytes(int64_t bytes_to_skip);
absl::Status Seek(int64_t position);
absl::Status Hint(int64_t bytes_to_read);
int64_t Tell() const { return file_pos_ - (limit_ - pos_); }
RandomAccessFile* file() const { return file_; }
private:
absl::Status FillBuffer();
absl::Status ReadVarint32Fallback(uint32* result);
absl::Status ReadVarint64Fallback(uint64* result);
template <typename T>
absl::Status ReadVarintFallback(T* result, int max_bytes);
RandomAccessFile* file_;
int64_t file_pos_;
size_t size_;
char* buf_;
char* pos_;
char* limit_;
InputBuffer(const InputBuffer&) = delete;
void operator=(const InputBuffer&) = delete;
};
extern template Status InputBuffer::ReadLine<std::string>(std::string* result);
extern template Status InputBuffer::ReadLine<tstring>(tstring* result);
inline absl::Status InputBuffer::ReadVarint32(uint32* result) {
if (pos_ + core::kMaxVarint32Bytes <= limit_) {
const char* offset = core::GetVarint32Ptr(pos_, limit_, result);
if (offset == nullptr) return errors::OutOfRange("Parsed past limit.");
pos_ = const_cast<char*>(offset);
return absl::OkStatus();
} else {
return ReadVarint32Fallback(result);
}
}
inline absl::Status InputBuffer::ReadVarint64(uint64* result) {
if (pos_ + core::kMaxVarint64Bytes <= limit_) {
const char* offset = core::GetVarint64Ptr(pos_, limit_, result);
if (offset == nullptr) return errors::OutOfRange("Parsed past limit.");
pos_ = const_cast<char*>(offset);
return absl::OkStatus();
} else {
return ReadVarint64Fallback(result);
}
}
}
}
#endif
#include "tsl/lib/io/inputbuffer.h"
#include <algorithm>
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace io {
InputBuffer::InputBuffer(RandomAccessFile* file, size_t buffer_bytes)
: file_(file),
file_pos_(0),
size_(buffer_bytes),
buf_(new char[size_]),
pos_(buf_),
limit_(buf_) {}
InputBuffer::~InputBuffer() { delete[] buf_; }
absl::Status InputBuffer::FillBuffer() {
StringPiece data;
absl::Status s = file_->Read(file_pos_, size_, &data, buf_);
if (data.data() != buf_) {
memmove(buf_, data.data(), data.size());
}
pos_ = buf_;
limit_ = pos_ + data.size();
file_pos_ += data.size();
return s;
}
template <typename T>
absl::Status InputBuffer::ReadLine(T* result) {
result->clear();
absl::Status s;
do {
size_t buf_remain = limit_ - pos_;
char* newline = static_cast<char*>(memchr(pos_, '\n', buf_remain));
if (newline != nullptr) {
size_t result_len = newline - pos_;
result->append(pos_, result_len);
pos_ = newline + 1;
if (!result->empty() && result->back() == '\r') {
result->resize(result->size() - 1);
}
return absl::OkStatus();
}
if (buf_remain > 0) result->append(pos_, buf_remain);
s = FillBuffer();
DCHECK_EQ(pos_, buf_);
} while (limit_ != buf_);
if (!result->empty() && result->back() == '\r') {
result->resize(result->size() - 1);
}
if (errors::IsOutOfRange(s) && !result->empty()) {
return absl::OkStatus();
}
return s;
}
template Status InputBuffer::ReadLine<std::string>(std::string* result);
template Status InputBuffer::ReadLine<tstring>(tstring* result);
absl::Status InputBuffer::ReadNBytes(int64_t bytes_to_read,
std::string* result) {
result->clear();
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
result->resize(bytes_to_read);
size_t bytes_read = 0;
absl::Status status = ReadNBytes(bytes_to_read, &(*result)[0], &bytes_read);
if (bytes_read < bytes_to_read) result->resize(bytes_read);
return status;
}
absl::Status InputBuffer::ReadNBytes(int64_t bytes_to_read, char* result,
size_t* bytes_read) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
absl::Status status;
*bytes_read = 0;
while (*bytes_read < static_cast<size_t>(bytes_to_read)) {
if (pos_ == limit_) {
status = FillBuffer();
if (limit_ == buf_) {
break;
}
}
const int64_t bytes_to_copy =
std::min<int64_t>(limit_ - pos_, bytes_to_read - *bytes_read);
memcpy(result + *bytes_read, pos_, bytes_to_copy);
pos_ += bytes_to_copy;
*bytes_read += bytes_to_copy;
}
if (errors::IsOutOfRange(status) &&
(*bytes_read == static_cast<size_t>(bytes_to_read))) {
return absl::OkStatus();
}
return status;
}
absl::Status InputBuffer::ReadVarint32Fallback(uint32* result) {
absl::Status s = ReadVarintFallback(result, core::kMaxVarint32Bytes);
if (errors::IsDataLoss(s)) {
return errors::DataLoss("Stored data is too large to be a varint32.");
}
return s;
}
absl::Status InputBuffer::ReadVarint64Fallback(uint64* result) {
absl::Status s = ReadVarintFallback(result, core::kMaxVarint64Bytes);
if (errors::IsDataLoss(s)) {
return errors::DataLoss("Stored data is too large to be a varint64.");
}
return s;
}
template <typename T>
absl::Status InputBuffer::ReadVarintFallback(T* result, int max_bytes) {
uint8 scratch = 0;
auto* p = reinterpret_cast<char*>(&scratch);
size_t unused_bytes_read = 0;
*result = 0;
for (int index = 0; index < max_bytes; index++) {
int shift = 7 * index;
TF_RETURN_IF_ERROR(ReadNBytes(1, p, &unused_bytes_read));
*result |= (static_cast<T>(scratch) & 127) << shift;
if (!(scratch & 128)) return absl::OkStatus();
}
return errors::DataLoss("Stored data longer than ", max_bytes, " bytes.");
}
absl::Status InputBuffer::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can only skip forward, not ",
bytes_to_skip);
}
int64_t bytes_skipped = 0;
absl::Status s;
while (bytes_skipped < bytes_to_skip) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == buf_) {
break;
}
}
const int64_t bytes_to_advance =
std::min<int64_t>(limit_ - pos_, bytes_to_skip - bytes_skipped);
bytes_skipped += bytes_to_advance;
pos_ += bytes_to_advance;
}
if (errors::IsOutOfRange(s) && bytes_skipped == bytes_to_skip) {
return absl::OkStatus();
}
return s;
}
absl::Status InputBuffer::Seek(int64_t position) {
if (position < 0) {
return errors::InvalidArgument("Seeking to a negative position: ",
position);
}
const int64_t bufpos = file_pos_ - static_cast<int64_t>(limit_ - buf_);
if (position >= bufpos && position < file_pos_) {
pos_ = buf_ + (position - bufpos);
DCHECK(pos_ >= buf_ && pos_ < limit_);
} else {
pos_ = limit_ = buf_;
file_pos_ = position;
}
return absl::OkStatus();
}
absl::Status InputBuffer::Hint(int64_t bytes_to_read) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
if (bytes_to_read > size_) {
return absl::OkStatus();
}
const int64_t bytes_remain_in_buf = static_cast<int64_t>(limit_ - pos_);
if (bytes_to_read <= bytes_remain_in_buf) {
return absl::OkStatus();
}
memmove(buf_, pos_, bytes_remain_in_buf);
pos_ = buf_;
limit_ = buf_ + bytes_remain_in_buf;
bytes_to_read -= bytes_remain_in_buf;
StringPiece data;
absl::Status s = file_->Read(file_pos_, bytes_to_read, &data, limit_);
if (data.data() != limit_) {
memmove(limit_, data.data(), data.size());
}
limit_ += data.size();
file_pos_ += data.size();
if (errors::IsOutOfRange(s) && data.size() == bytes_to_read) {
return absl::OkStatus();
} else {
return s;
}
}
}
} | #include "tsl/lib/io/inputbuffer.h"
#include <vector>
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/coding.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
static std::vector<int> BufferSizes() {
return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 65536};
}
TEST(InputBuffer, ReadLine_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_CRLF) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
size_t bytes_read;
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
char read[5];
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, read, &bytes_read));
EXPECT_EQ(StringPiece(read, 3), "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(StringPiece(read, 3), "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, read, &bytes_read));
EXPECT_EQ(StringPiece(read, 4), "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(StringPiece(read, 4), "3456");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, read, &bytes_read)));
EXPECT_EQ(StringPiece(read, 3), "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, read, &bytes_read)));
EXPECT_EQ(StringPiece(read, 3), "789");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(StringPiece(read, 3), "789");
EXPECT_EQ(10, in.Tell());
}
}
TEST(InputBuffer, SkipNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_CHECK_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_CHECK_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_CHECK_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_CHECK_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(InputBuffer, Seek) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "345");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.Seek(3));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
TF_CHECK_OK(in.Seek(4));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "4567");
TF_CHECK_OK(in.Seek(1 << 25));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(1, &read)));
EXPECT_TRUE(absl::StrContains(in.Seek(-1).ToString(), "negative position"));
}
}
TEST(InputBuffer, ReadVarint32) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
std::vector<uint32> data;
uint32 i = 0;
for (; i < (1U << 10); i += 1) data.push_back(i);
for (; i < (1U << 15); i += 5) data.push_back(i);
for (; i < (1U << 31); i += 132817) data.push_back(i);
data.push_back(std::numeric_limits<uint32>::max());
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(env->NewWritableFile(fname, &file));
string varint;
for (uint32 number : data) {
varint.clear();
core::PutVarint32(&varint, number);
TF_CHECK_OK(file->Append(StringPiece(varint)));
}
}
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
io::InputBuffer in(file.get(), buf_size);
uint32 result = 0;
for (uint32 expected : data) {
TF_ASSERT_OK(in.ReadVarint32(&result));
EXPECT_EQ(expected, result);
}
EXPECT_TRUE(errors::IsOutOfRange(in.ReadVarint32(&result)));
}
}
TEST(InputBuffer, ReadVarint64) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
std::vector<uint64> data;
uint64 i = 0;
for (; i < (1U << 10); i += 1) data.push_back(i);
for (; i < (1U << 15); i += 5) data.push_back(i);
for (; i < (1U << 31); i += 164817) data.push_back(i);
for (; i < (1ULL << 63); i += 16481797854795663UL) data.push_back(i);
data.push_back(std::numeric_limits<uint64>::max());
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(env->NewWritableFile(fname, &file));
string varint;
for (uint64 number : data) {
varint.clear();
core::PutVarint64(&varint, number);
TF_CHECK_OK(file->Append(StringPiece(varint)));
}
}
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
io::InputBuffer in(file.get(), buf_size);
uint64 result = 0;
for (uint64 expected : data) {
TF_ASSERT_OK(in.ReadVarint64(&result));
EXPECT_EQ(expected, result);
}
EXPECT_TRUE(errors::IsOutOfRange(in.ReadVarint64(&result)));
}
}
TEST(InputBuffer, Hint) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.Hint(4));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "345");
TF_CHECK_OK(in.Hint(1));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "678");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.Hint(7));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
TF_CHECK_OK(in.Hint(2));
TF_CHECK_OK(in.Seek(4));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "4567");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.Hint(1 << 25));
TF_CHECK_OK(in.Seek(1 << 25));
EXPECT_TRUE(errors::IsOutOfRange(in.Hint(1)));
EXPECT_TRUE(errors::IsInvalidArgument(in.Hint(-1)));
}
}
}
} |
38 | #ifndef ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_
#define ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_
#include <string>
#include "absl/base/config.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
std::string ProgramInvocationName();
std::string ShortProgramInvocationName();
void SetProgramInvocationName(absl::string_view prog_name_str);
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/flags/internal/program_name.h"
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/flags/internal/path_util.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
ABSL_CONST_INIT static absl::Mutex program_name_guard(absl::kConstInit);
ABSL_CONST_INIT static std::string* program_name
ABSL_GUARDED_BY(program_name_guard) = nullptr;
std::string ProgramInvocationName() {
absl::MutexLock l(&program_name_guard);
return program_name ? *program_name : "UNKNOWN";
}
std::string ShortProgramInvocationName() {
absl::MutexLock l(&program_name_guard);
return program_name ? std::string(flags_internal::Basename(*program_name))
: "UNKNOWN";
}
void SetProgramInvocationName(absl::string_view prog_name_str) {
absl::MutexLock l(&program_name_guard);
if (!program_name)
program_name = new std::string(prog_name_str);
else
program_name->assign(prog_name_str.data(), prog_name_str.size());
}
}
ABSL_NAMESPACE_END
} | #include "absl/flags/internal/program_name.h"
#include <string>
#include "gtest/gtest.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
namespace {
namespace flags = absl::flags_internal;
TEST(FlagsPathUtilTest, TestProgamNameInterfaces) {
flags::SetProgramInvocationName("absl/flags/program_name_test");
std::string program_name = flags::ProgramInvocationName();
for (char& c : program_name)
if (c == '\\') c = '/';
#if !defined(__wasm__) && !defined(__asmjs__)
const std::string expect_name = "absl/flags/program_name_test";
const std::string expect_basename = "program_name_test";
#else
const std::string expect_name = "this.program";
const std::string expect_basename = "this.program";
#endif
EXPECT_TRUE(absl::EndsWith(program_name, expect_name)) << program_name;
EXPECT_EQ(flags::ShortProgramInvocationName(), expect_basename);
flags::SetProgramInvocationName("a/my_test");
EXPECT_EQ(flags::ProgramInvocationName(), "a/my_test");
EXPECT_EQ(flags::ShortProgramInvocationName(), "my_test");
absl::string_view not_null_terminated("absl/aaa/bbb");
not_null_terminated = not_null_terminated.substr(1, 10);
flags::SetProgramInvocationName(not_null_terminated);
EXPECT_EQ(flags::ProgramInvocationName(), "bsl/aaa/bb");
EXPECT_EQ(flags::ShortProgramInvocationName(), "bb");
}
} |
39 | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_CEL_FUNCTION_REGISTRY_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_CEL_FUNCTION_REGISTRY_H_
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "base/function.h"
#include "base/function_descriptor.h"
#include "base/kind.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "runtime/function_overload_reference.h"
#include "runtime/function_registry.h"
namespace google::api::expr::runtime {
class CelFunctionRegistry {
public:
using LazyOverload = cel::FunctionRegistry::LazyOverload;
CelFunctionRegistry() = default;
~CelFunctionRegistry() = default;
using Registrar = absl::Status (*)(CelFunctionRegistry*,
const InterpreterOptions&);
absl::Status Register(std::unique_ptr<CelFunction> function) {
auto descriptor = function->descriptor();
return Register(descriptor, std::move(function));
}
absl::Status Register(const cel::FunctionDescriptor& descriptor,
std::unique_ptr<cel::Function> implementation) {
return modern_registry_.Register(descriptor, std::move(implementation));
}
absl::Status RegisterAll(std::initializer_list<Registrar> registrars,
const InterpreterOptions& opts);
absl::Status RegisterLazyFunction(const CelFunctionDescriptor& descriptor) {
return modern_registry_.RegisterLazyFunction(descriptor);
}
std::vector<const CelFunction*> FindOverloads(
absl::string_view name, bool receiver_style,
const std::vector<CelValue::Type>& types) const;
std::vector<cel::FunctionOverloadReference> FindStaticOverloads(
absl::string_view name, bool receiver_style,
const std::vector<cel::Kind>& types) const {
return modern_registry_.FindStaticOverloads(name, receiver_style, types);
}
std::vector<const CelFunctionDescriptor*> FindLazyOverloads(
absl::string_view name, bool receiver_style,
const std::vector<CelValue::Type>& types) const;
std::vector<LazyOverload> ModernFindLazyOverloads(
absl::string_view name, bool receiver_style,
const std::vector<CelValue::Type>& types) const {
return modern_registry_.FindLazyOverloads(name, receiver_style, types);
}
absl::node_hash_map<std::string, std::vector<const cel::FunctionDescriptor*>>
ListFunctions() const {
return modern_registry_.ListFunctions();
}
const cel::FunctionRegistry& InternalGetRegistry() const {
return modern_registry_;
}
cel::FunctionRegistry& InternalGetRegistry() { return modern_registry_; }
private:
cel::FunctionRegistry modern_registry_;
mutable absl::Mutex mu_;
mutable absl::flat_hash_map<const cel::Function*,
std::unique_ptr<CelFunction>>
functions_ ABSL_GUARDED_BY(mu_);
};
}
#endif
#include "eval/public/cel_function_registry.h"
#include <algorithm>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "base/function.h"
#include "base/function_descriptor.h"
#include "base/type_provider.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/internal/interop.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_overload_reference.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::extensions::ProtoMemoryManagerRef;
class ProxyToModernCelFunction : public CelFunction {
public:
ProxyToModernCelFunction(const cel::FunctionDescriptor& descriptor,
const cel::Function& implementation)
: CelFunction(descriptor), implementation_(&implementation) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
google::protobuf::Arena* arena) const override {
auto memory_manager = ProtoMemoryManagerRef(arena);
cel::common_internal::LegacyValueManager manager(
memory_manager, cel::TypeProvider::Builtin());
cel::FunctionEvaluationContext context(manager);
std::vector<cel::Value> modern_args =
cel::interop_internal::LegacyValueToModernValueOrDie(arena, args);
CEL_ASSIGN_OR_RETURN(auto modern_result,
implementation_->Invoke(context, modern_args));
*result = cel::interop_internal::ModernValueToLegacyValueOrDie(
arena, modern_result);
return absl::OkStatus();
}
private:
const cel::Function* implementation_;
};
}
absl::Status CelFunctionRegistry::RegisterAll(
std::initializer_list<Registrar> registrars,
const InterpreterOptions& opts) {
for (Registrar registrar : registrars) {
CEL_RETURN_IF_ERROR(registrar(this, opts));
}
return absl::OkStatus();
}
std::vector<const CelFunction*> CelFunctionRegistry::FindOverloads(
absl::string_view name, bool receiver_style,
const std::vector<CelValue::Type>& types) const {
std::vector<cel::FunctionOverloadReference> matched_funcs =
modern_registry_.FindStaticOverloads(name, receiver_style, types);
std::vector<const CelFunction*> results;
results.reserve(matched_funcs.size());
{
absl::MutexLock lock(&mu_);
for (cel::FunctionOverloadReference entry : matched_funcs) {
std::unique_ptr<CelFunction>& legacy_impl =
functions_[&entry.implementation];
if (legacy_impl == nullptr) {
legacy_impl = std::make_unique<ProxyToModernCelFunction>(
entry.descriptor, entry.implementation);
}
results.push_back(legacy_impl.get());
}
}
return results;
}
std::vector<const CelFunctionDescriptor*>
CelFunctionRegistry::FindLazyOverloads(
absl::string_view name, bool receiver_style,
const std::vector<CelValue::Type>& types) const {
std::vector<LazyOverload> lazy_overloads =
modern_registry_.FindLazyOverloads(name, receiver_style, types);
std::vector<const CelFunctionDescriptor*> result;
result.reserve(lazy_overloads.size());
for (const LazyOverload& overload : lazy_overloads) {
result.push_back(&overload.descriptor);
}
return result;
}
} | #include "eval/public/cel_function_registry.h"
#include <memory>
#include <tuple>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/kind.h"
#include "eval/internal/adapter_activation_impl.h"
#include "eval/public/activation.h"
#include "eval/public/cel_function.h"
#include "internal/testing.h"
#include "runtime/function_overload_reference.h"
namespace google::api::expr::runtime {
namespace {
using testing::ElementsAre;
using testing::Eq;
using testing::HasSubstr;
using testing::Property;
using testing::SizeIs;
using testing::Truly;
using cel::internal::StatusIs;
class ConstCelFunction : public CelFunction {
public:
ConstCelFunction() : CelFunction(MakeDescriptor()) {}
explicit ConstCelFunction(const CelFunctionDescriptor& desc)
: CelFunction(desc) {}
static CelFunctionDescriptor MakeDescriptor() {
return {"ConstFunction", false, {}};
}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* output,
google::protobuf::Arena* arena) const override {
*output = CelValue::CreateInt64(42);
return absl::OkStatus();
}
};
TEST(CelFunctionRegistryTest, InsertAndRetrieveLazyFunction) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
Activation activation;
ASSERT_OK(registry.RegisterLazyFunction(lazy_function_desc));
const auto descriptors =
registry.FindLazyOverloads("LazyFunction", false, {});
EXPECT_THAT(descriptors, testing::SizeIs(1));
}
TEST(CelFunctionRegistryTest, LazyAndStaticFunctionShareDescriptorSpace) {
CelFunctionRegistry registry;
CelFunctionDescriptor desc = ConstCelFunction::MakeDescriptor();
ASSERT_OK(registry.RegisterLazyFunction(desc));
absl::Status status = registry.Register(ConstCelFunction::MakeDescriptor(),
std::make_unique<ConstCelFunction>());
EXPECT_FALSE(status.ok());
}
TEST(CelFunctionRegistryTest, FindStaticOverloadsReturns) {
CelFunctionRegistry registry;
CelFunctionDescriptor desc = ConstCelFunction::MakeDescriptor();
ASSERT_OK(registry.Register(desc, std::make_unique<ConstCelFunction>(desc)));
std::vector<cel::FunctionOverloadReference> overloads =
registry.FindStaticOverloads(desc.name(), false, {});
EXPECT_THAT(overloads,
ElementsAre(Truly(
[](const cel::FunctionOverloadReference& overload) -> bool {
return overload.descriptor.name() == "ConstFunction";
})))
<< "Expected single ConstFunction()";
}
TEST(CelFunctionRegistryTest, ListFunctions) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
ASSERT_OK(registry.RegisterLazyFunction(lazy_function_desc));
EXPECT_OK(registry.Register(ConstCelFunction::MakeDescriptor(),
std::make_unique<ConstCelFunction>()));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(registered_functions, SizeIs(2));
EXPECT_THAT(registered_functions["LazyFunction"], SizeIs(1));
EXPECT_THAT(registered_functions["ConstFunction"], SizeIs(1));
}
TEST(CelFunctionRegistryTest, LegacyFindLazyOverloads) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
ASSERT_OK(registry.RegisterLazyFunction(lazy_function_desc));
ASSERT_OK(registry.Register(ConstCelFunction::MakeDescriptor(),
std::make_unique<ConstCelFunction>()));
EXPECT_THAT(registry.FindLazyOverloads("LazyFunction", false, {}),
ElementsAre(Truly([](const CelFunctionDescriptor* descriptor) {
return descriptor->name() == "LazyFunction";
})))
<< "Expected single lazy overload for LazyFunction()";
}
TEST(CelFunctionRegistryTest, DefaultLazyProvider) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
Activation activation;
cel::interop_internal::AdapterActivationImpl modern_activation(activation);
EXPECT_OK(registry.RegisterLazyFunction(lazy_function_desc));
EXPECT_OK(activation.InsertFunction(
std::make_unique<ConstCelFunction>(lazy_function_desc)));
auto providers = registry.ModernFindLazyOverloads("LazyFunction", false, {});
EXPECT_THAT(providers, testing::SizeIs(1));
ASSERT_OK_AND_ASSIGN(auto func, providers[0].provider.GetFunction(
lazy_function_desc, modern_activation));
ASSERT_TRUE(func.has_value());
EXPECT_THAT(func->descriptor,
Property(&cel::FunctionDescriptor::name, Eq("LazyFunction")));
}
TEST(CelFunctionRegistryTest, DefaultLazyProviderNoOverloadFound) {
CelFunctionRegistry registry;
Activation legacy_activation;
cel::interop_internal::AdapterActivationImpl activation(legacy_activation);
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
EXPECT_OK(registry.RegisterLazyFunction(lazy_function_desc));
EXPECT_OK(legacy_activation.InsertFunction(
std::make_unique<ConstCelFunction>(lazy_function_desc)));
const auto providers =
registry.ModernFindLazyOverloads("LazyFunction", false, {});
ASSERT_THAT(providers, testing::SizeIs(1));
const auto& provider = providers[0].provider;
auto func = provider.GetFunction({"LazyFunc", false, {cel::Kind::kInt64}},
activation);
ASSERT_OK(func.status());
EXPECT_EQ(*func, absl::nullopt);
}
TEST(CelFunctionRegistryTest, DefaultLazyProviderAmbiguousLookup) {
CelFunctionRegistry registry;
Activation legacy_activation;
cel::interop_internal::AdapterActivationImpl activation(legacy_activation);
CelFunctionDescriptor desc1{"LazyFunc", false, {CelValue::Type::kInt64}};
CelFunctionDescriptor desc2{"LazyFunc", false, {CelValue::Type::kUint64}};
CelFunctionDescriptor match_desc{"LazyFunc", false, {CelValue::Type::kAny}};
ASSERT_OK(registry.RegisterLazyFunction(match_desc));
ASSERT_OK(legacy_activation.InsertFunction(
std::make_unique<ConstCelFunction>(desc1)));
ASSERT_OK(legacy_activation.InsertFunction(
std::make_unique<ConstCelFunction>(desc2)));
auto providers =
registry.ModernFindLazyOverloads("LazyFunc", false, {cel::Kind::kAny});
ASSERT_THAT(providers, testing::SizeIs(1));
const auto& provider = providers[0].provider;
auto func = provider.GetFunction(match_desc, activation);
EXPECT_THAT(std::string(func.status().message()),
HasSubstr("Couldn't resolve function"));
}
TEST(CelFunctionRegistryTest, CanRegisterNonStrictFunction) {
{
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("NonStrictFunction",
false,
{CelValue::Type::kAny},
false);
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
EXPECT_THAT(registry.FindStaticOverloads("NonStrictFunction", false,
{CelValue::Type::kAny}),
SizeIs(1));
}
{
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("NonStrictLazyFunction",
false,
{CelValue::Type::kAny},
false);
EXPECT_OK(registry.RegisterLazyFunction(descriptor));
EXPECT_THAT(registry.FindLazyOverloads("NonStrictLazyFunction", false,
{CelValue::Type::kAny}),
SizeIs(1));
}
}
using NonStrictTestCase = std::tuple<bool, bool>;
using NonStrictRegistrationFailTest = testing::TestWithParam<NonStrictTestCase>;
TEST_P(NonStrictRegistrationFailTest,
IfOtherOverloadExistsRegisteringNonStrictFails) {
bool existing_function_is_lazy, new_function_is_lazy;
std::tie(existing_function_is_lazy, new_function_is_lazy) = GetParam();
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("OverloadedFunction",
false,
{CelValue::Type::kAny},
true);
if (existing_function_is_lazy) {
ASSERT_OK(registry.RegisterLazyFunction(descriptor));
} else {
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
}
CelFunctionDescriptor new_descriptor(
"OverloadedFunction",
false, {CelValue::Type::kAny, CelValue::Type::kAny},
false);
absl::Status status;
if (new_function_is_lazy) {
status = registry.RegisterLazyFunction(new_descriptor);
} else {
status = registry.Register(
new_descriptor, std::make_unique<ConstCelFunction>(new_descriptor));
}
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists,
HasSubstr("Only one overload")));
}
TEST_P(NonStrictRegistrationFailTest,
IfOtherNonStrictExistsRegisteringStrictFails) {
bool existing_function_is_lazy, new_function_is_lazy;
std::tie(existing_function_is_lazy, new_function_is_lazy) = GetParam();
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("OverloadedFunction",
false,
{CelValue::Type::kAny},
false);
if (existing_function_is_lazy) {
ASSERT_OK(registry.RegisterLazyFunction(descriptor));
} else {
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
}
CelFunctionDescriptor new_descriptor(
"OverloadedFunction",
false, {CelValue::Type::kAny, CelValue::Type::kAny},
true);
absl::Status status;
if (new_function_is_lazy) {
status = registry.RegisterLazyFunction(new_descriptor);
} else {
status = registry.Register(
new_descriptor, std::make_unique<ConstCelFunction>(new_descriptor));
}
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists,
HasSubstr("Only one overload")));
}
TEST_P(NonStrictRegistrationFailTest, CanRegisterStrictFunctionsWithoutLimit) {
bool existing_function_is_lazy, new_function_is_lazy;
std::tie(existing_function_is_lazy, new_function_is_lazy) = GetParam();
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("OverloadedFunction",
false,
{CelValue::Type::kAny},
true);
if (existing_function_is_lazy) {
ASSERT_OK(registry.RegisterLazyFunction(descriptor));
} else {
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
}
CelFunctionDescriptor new_descriptor(
"OverloadedFunction",
false, {CelValue::Type::kAny, CelValue::Type::kAny},
true);
absl::Status status;
if (new_function_is_lazy) {
status = registry.RegisterLazyFunction(new_descriptor);
} else {
status = registry.Register(
new_descriptor, std::make_unique<ConstCelFunction>(new_descriptor));
}
EXPECT_OK(status);
}
INSTANTIATE_TEST_SUITE_P(NonStrictRegistrationFailTest,
NonStrictRegistrationFailTest,
testing::Combine(testing::Bool(), testing::Bool()));
}
} |
40 | #ifndef ABSL_SYNCHRONIZATION_NOTIFICATION_H_
#define ABSL_SYNCHRONIZATION_NOTIFICATION_H_
#include <atomic>
#include "absl/base/attributes.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class Notification {
public:
Notification() : notified_yet_(false) {}
explicit Notification(bool prenotify) : notified_yet_(prenotify) {}
Notification(const Notification&) = delete;
Notification& operator=(const Notification&) = delete;
~Notification();
ABSL_MUST_USE_RESULT bool HasBeenNotified() const {
return HasBeenNotifiedInternal(&this->notified_yet_);
}
void WaitForNotification() const;
bool WaitForNotificationWithTimeout(absl::Duration timeout) const;
bool WaitForNotificationWithDeadline(absl::Time deadline) const;
void Notify();
private:
static inline bool HasBeenNotifiedInternal(
const std::atomic<bool>* notified_yet) {
return notified_yet->load(std::memory_order_acquire);
}
mutable Mutex mutex_;
std::atomic<bool> notified_yet_;
};
ABSL_NAMESPACE_END
}
#endif
#include "absl/synchronization/notification.h"
#include <atomic>
#include "absl/base/internal/raw_logging.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
void Notification::Notify() {
MutexLock l(&this->mutex_);
#ifndef NDEBUG
if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) {
ABSL_RAW_LOG(
FATAL,
"Notify() method called more than once for Notification object %p",
static_cast<void *>(this));
}
#endif
notified_yet_.store(true, std::memory_order_release);
}
Notification::~Notification() {
MutexLock l(&this->mutex_);
}
void Notification::WaitForNotification() const {
if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
this->mutex_.LockWhen(Condition(&HasBeenNotifiedInternal,
&this->notified_yet_));
this->mutex_.Unlock();
}
}
bool Notification::WaitForNotificationWithTimeout(
absl::Duration timeout) const {
bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
if (!notified) {
notified = this->mutex_.LockWhenWithTimeout(
Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout);
this->mutex_.Unlock();
}
return notified;
}
bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const {
bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
if (!notified) {
notified = this->mutex_.LockWhenWithDeadline(
Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline);
this->mutex_.Unlock();
}
return notified;
}
ABSL_NAMESPACE_END
} | #include "absl/synchronization/notification.h"
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class ThreadSafeCounter {
public:
ThreadSafeCounter() : count_(0) {}
void Increment() {
MutexLock lock(&mutex_);
++count_;
}
int Get() const {
MutexLock lock(&mutex_);
return count_;
}
void WaitUntilGreaterOrEqual(int n) {
MutexLock lock(&mutex_);
auto cond = [this, n]() { return count_ >= n; };
mutex_.Await(Condition(&cond));
}
private:
mutable Mutex mutex_;
int count_;
};
static void RunWorker(int i, ThreadSafeCounter* ready_counter,
Notification* notification,
ThreadSafeCounter* done_counter) {
ready_counter->Increment();
notification->WaitForNotification();
done_counter->Increment();
}
static void BasicTests(bool notify_before_waiting, Notification* notification) {
EXPECT_FALSE(notification->HasBeenNotified());
EXPECT_FALSE(
notification->WaitForNotificationWithTimeout(absl::Milliseconds(0)));
EXPECT_FALSE(notification->WaitForNotificationWithDeadline(absl::Now()));
const absl::Duration delay = absl::Milliseconds(50);
const absl::Time start = absl::Now();
EXPECT_FALSE(notification->WaitForNotificationWithTimeout(delay));
const absl::Duration elapsed = absl::Now() - start;
const absl::Duration slop = absl::Milliseconds(5);
EXPECT_LE(delay - slop, elapsed)
<< "WaitForNotificationWithTimeout returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;
ThreadSafeCounter ready_counter;
ThreadSafeCounter done_counter;
if (notify_before_waiting) {
notification->Notify();
}
const int kNumThreads = 10;
std::vector<std::thread> workers;
for (int i = 0; i < kNumThreads; ++i) {
workers.push_back(std::thread(&RunWorker, i, &ready_counter, notification,
&done_counter));
}
if (!notify_before_waiting) {
ready_counter.WaitUntilGreaterOrEqual(kNumThreads);
EXPECT_EQ(0, done_counter.Get());
notification->Notify();
}
notification->WaitForNotification();
EXPECT_TRUE(notification->HasBeenNotified());
EXPECT_TRUE(notification->WaitForNotificationWithTimeout(absl::Seconds(0)));
EXPECT_TRUE(notification->WaitForNotificationWithDeadline(absl::Now()));
for (std::thread& worker : workers) {
worker.join();
}
EXPECT_EQ(kNumThreads, ready_counter.Get());
EXPECT_EQ(kNumThreads, done_counter.Get());
}
TEST(NotificationTest, SanityTest) {
Notification local_notification1, local_notification2;
BasicTests(false, &local_notification1);
BasicTests(true, &local_notification2);
}
ABSL_NAMESPACE_END
} |
41 | #ifndef TENSORFLOW_CORE_API_DEF_UPDATE_API_DEF_H_
#define TENSORFLOW_CORE_API_DEF_UPDATE_API_DEF_H_
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
string CreateApiDef(const OpDef& op);
string RemoveDoc(const OpDef& op, const string& file_contents,
size_t start_location);
void CreateApiDefs(const OpList& ops, const string& api_def_dir,
const string& op_file_pattern);
}
#endif
#include "tensorflow/core/api_def/update_api_def.h"
#include <ctype.h>
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/api_def/excluded_ops.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace {
constexpr char kApiDefFileFormat[] = "api_def_%s.pbtxt";
constexpr char kDocStart[] = ".Doc(R\"doc(";
constexpr char kDocEnd[] = ")doc\")";
void FillBaseApiDef(ApiDef* api_def, const OpDef& op) {
api_def->set_graph_op_name(op.name());
for (auto& input_arg : op.input_arg()) {
if (!input_arg.description().empty()) {
auto* api_def_in_arg = api_def->add_in_arg();
api_def_in_arg->set_name(input_arg.name());
api_def_in_arg->set_description(input_arg.description());
}
}
for (auto& output_arg : op.output_arg()) {
if (!output_arg.description().empty()) {
auto* api_def_out_arg = api_def->add_out_arg();
api_def_out_arg->set_name(output_arg.name());
api_def_out_arg->set_description(output_arg.description());
}
}
for (auto& attr : op.attr()) {
if (!attr.description().empty()) {
auto* api_def_attr = api_def->add_attr();
api_def_attr->set_name(attr.name());
api_def_attr->set_description(attr.description());
}
}
api_def->set_summary(op.summary());
api_def->set_description(op.description());
}
bool OpHasDocs(const OpDef& op) {
if (!op.summary().empty() || !op.description().empty()) {
return true;
}
for (const auto& arg : op.input_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& arg : op.output_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& attr : op.attr()) {
if (!attr.description().empty()) {
return true;
}
}
return false;
}
bool CheckDocsMatch(const OpDef& op1, const OpDef& op2) {
if (op1.summary() != op2.summary() ||
op1.description() != op2.description() ||
op1.input_arg_size() != op2.input_arg_size() ||
op1.output_arg_size() != op2.output_arg_size() ||
op1.attr_size() != op2.attr_size()) {
return false;
}
for (int i = 0; i < op1.input_arg_size(); ++i) {
if (op1.input_arg(i).description() != op2.input_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.output_arg_size(); ++i) {
if (op1.output_arg(i).description() != op2.output_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.attr_size(); ++i) {
if (op1.attr(i).description() != op2.attr(i).description()) {
return false;
}
}
return true;
}
bool ValidateOpDocs(const OpDef& op, const string& doc) {
OpDefBuilder b(op.name());
for (const auto& arg : op.input_arg()) {
b.Input(arg.name() + ":string");
}
for (const auto& arg : op.output_arg()) {
b.Output(arg.name() + ":string");
}
for (const auto& attr : op.attr()) {
b.Attr(attr.name() + ":string");
}
b.Doc(doc);
OpRegistrationData op_reg_data;
TF_CHECK_OK(b.Finalize(&op_reg_data));
return CheckDocsMatch(op, op_reg_data.op_def);
}
}
string RemoveDoc(const OpDef& op, const string& file_contents,
size_t start_location) {
const auto doc_start_location = file_contents.find(kDocStart, start_location);
const string format_error = strings::Printf(
"Could not find %s doc for removal. Make sure the doc is defined with "
"'%s' prefix and '%s' suffix or remove the doc manually.",
op.name().c_str(), kDocStart, kDocEnd);
if (doc_start_location == string::npos) {
std::cerr << format_error << std::endl;
LOG(ERROR) << "Didn't find doc start";
return file_contents;
}
const auto doc_end_location = file_contents.find(kDocEnd, doc_start_location);
if (doc_end_location == string::npos) {
LOG(ERROR) << "Didn't find doc start";
std::cerr << format_error << std::endl;
return file_contents;
}
const auto doc_start_size = sizeof(kDocStart) - 1;
string doc_text = file_contents.substr(
doc_start_location + doc_start_size,
doc_end_location - doc_start_location - doc_start_size);
if (!ValidateOpDocs(op, doc_text)) {
LOG(ERROR) << "Invalid doc: " << doc_text;
std::cerr << format_error << std::endl;
return file_contents;
}
auto before_doc = file_contents.substr(0, doc_start_location);
absl::StripTrailingAsciiWhitespace(&before_doc);
return before_doc +
file_contents.substr(doc_end_location + sizeof(kDocEnd) - 1);
}
namespace {
void RemoveDocs(const std::vector<const OpDef*>& ops,
const std::vector<string>& op_files) {
std::set<string> processed_ops;
for (const auto& file : op_files) {
string file_contents;
bool file_contents_updated = false;
TF_CHECK_OK(ReadFileToString(Env::Default(), file, &file_contents));
for (auto op : ops) {
if (processed_ops.find(op->name()) != processed_ops.end()) {
continue;
}
string register_call =
strings::Printf("REGISTER_OP(\"%s\")", op->name().c_str());
const auto register_call_location = file_contents.find(register_call);
if (register_call_location == string::npos) {
continue;
}
std::cout << "Removing .Doc call for " << op->name() << " from " << file
<< "." << std::endl;
file_contents = RemoveDoc(*op, file_contents, register_call_location);
file_contents_updated = true;
processed_ops.insert(op->name());
}
if (file_contents_updated) {
TF_CHECK_OK(WriteStringToFile(Env::Default(), file, file_contents))
<< "Could not remove .Doc calls in " << file
<< ". Make sure the file is writable.";
}
}
}
}
string CreateApiDef(const OpDef& op) {
ApiDefs api_defs;
FillBaseApiDef(api_defs.add_op(), op);
const std::vector<string> multi_line_fields = {"description"};
std::string new_api_defs_str;
::tensorflow::protobuf::TextFormat::PrintToString(api_defs,
&new_api_defs_str);
return PBTxtToMultiline(new_api_defs_str, multi_line_fields);
}
void CreateApiDefs(const OpList& ops, const string& api_def_dir,
const string& op_file_pattern) {
auto* excluded_ops = GetExcludedOps();
std::vector<const OpDef*> new_ops_with_docs;
for (const auto& op : ops.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
string file_path =
io::JoinPath(tensorflow::string(api_def_dir), kApiDefFileFormat);
file_path = strings::Printf(file_path.c_str(), op.name().c_str());
if (!Env::Default()->FileExists(file_path).ok()) {
std::cout << "Creating ApiDef file " << file_path << std::endl;
const auto& api_def_text = CreateApiDef(op);
TF_CHECK_OK(WriteStringToFile(Env::Default(), file_path, api_def_text));
if (OpHasDocs(op)) {
new_ops_with_docs.push_back(&op);
}
}
}
if (!op_file_pattern.empty()) {
std::vector<string> op_files;
TF_CHECK_OK(Env::Default()->GetMatchingPaths(op_file_pattern, &op_files));
RemoveDocs(new_ops_with_docs, op_files);
}
}
} | #include "tensorflow/core/api_def/update_api_def.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(UpdateApiDefTest, TestRemoveDocSingleOp) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op1.
Description
for Op1.
b : Description for b.
a: Description for a.
output: Description for output.
)doc");
)opdef";
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
EXPECT_EQ(op_def_text,
RemoveDoc(op, op_def_text_with_doc, 0 ));
}
TEST(UpdateApiDefTest, TestRemoveDocMultipleOps) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Doc(R"doc(
Summary for Op1.
)doc")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op2.
)doc");
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op3.
)doc");
)opdef";
const string op1_text = R"(
name: "Op1"
input_arg {
name: "a"
}
summary: "Summary for Op1."
)";
const string op2_text = R"(
name: "Op2"
input_arg {
name: "a"
}
summary: "Summary for Op2."
)";
const string op3_text = R"(
name: "Op3"
input_arg {
name: "c"
}
summary: "Summary for Op3."
)";
OpDef op1, op2, op3;
protobuf::TextFormat::ParseFromString(op1_text, &op1);
protobuf::TextFormat::ParseFromString(op2_text, &op2);
protobuf::TextFormat::ParseFromString(op3_text, &op3);
string updated_text =
RemoveDoc(op2, op_def_text_with_doc,
op_def_text_with_doc.find("Op2") );
EXPECT_EQ(string::npos, updated_text.find("Summary for Op2"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op1"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op3"));
updated_text = RemoveDoc(op3, updated_text,
updated_text.find("Op3") );
updated_text = RemoveDoc(op1, updated_text,
updated_text.find("Op1") );
EXPECT_EQ(op_def_text, updated_text);
}
TEST(UpdateApiDefTest, TestCreateApiDef) {
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
const string expected_api_def = R"(op {
graph_op_name: "Op1"
in_arg {
name: "a"
description: <<END
Description for a.
END
}
out_arg {
name: "output"
description: <<END
Description for output.
END
}
attr {
name: "b"
description: <<END
Description for b.
END
}
summary: "Summary for Op1."
description: <<END
Description
for Op1.
END
}
)";
EXPECT_EQ(expected_api_def, CreateApiDef(op));
}
}
} |
42 | #ifndef TENSORFLOW_LITE_SIMPLE_MEMORY_ARENA_H_
#define TENSORFLOW_LITE_SIMPLE_MEMORY_ARENA_H_
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
struct ArenaAllocWithUsageInterval {
ArenaAllocWithUsageInterval() { reset(); }
size_t offset;
size_t size;
int32_t tensor;
int32_t first_node;
int32_t last_node;
inline void reset() {
offset = 0;
size = 0;
tensor = -1;
first_node = -1;
last_node = -1;
}
inline bool operator<(const ArenaAllocWithUsageInterval& other) const {
return offset < other.offset;
}
};
struct PointerAlignedPointerPair {
char* pointer;
char* aligned_pointer;
};
class ResizableAlignedBuffer {
public:
ResizableAlignedBuffer(size_t alignment, int subgraph_index)
: buffer_{nullptr, nullptr},
data_size_(0),
alignment_(alignment),
subgraph_index_(subgraph_index) {
(void)subgraph_index_;
}
~ResizableAlignedBuffer() { Release(); }
bool Resize(size_t new_size);
void Release();
char* GetPtr() const { return buffer_.aligned_pointer; }
size_t GetSize() const { return data_size_; }
size_t GetAlignment() const { return alignment_; }
private:
ResizableAlignedBuffer(const ResizableAlignedBuffer&) = delete;
ResizableAlignedBuffer& operator=(const ResizableAlignedBuffer&) = delete;
ResizableAlignedBuffer(ResizableAlignedBuffer&&) = delete;
ResizableAlignedBuffer& operator=(ResizableAlignedBuffer&&) = delete;
PointerAlignedPointerPair buffer_;
size_t data_size_;
size_t alignment_;
int subgraph_index_;
};
class SimpleMemoryArena {
public:
explicit SimpleMemoryArena(size_t arena_alignment, int subgraph_index = 0)
: committed_(false),
high_water_mark_(0),
underlying_buffer_(arena_alignment, subgraph_index),
active_allocs_() {}
void ResetAllocs();
void PurgeActiveAllocs(int32_t node);
void PurgeAfter(int32_t node);
void CalculateActiveAllocs(
const std::vector<ArenaAllocWithUsageInterval>& allocs, int32_t node);
TfLiteStatus Allocate(TfLiteContext* context, size_t alignment, size_t size,
int32_t tensor, int32_t first_node, int32_t last_node,
ArenaAllocWithUsageInterval* new_alloc);
TfLiteStatus Commit(bool* arena_reallocated);
TfLiteStatus ResolveAlloc(TfLiteContext* context,
const ArenaAllocWithUsageInterval& alloc,
char** output_ptr);
TfLiteStatus ClearPlan();
TfLiteStatus ReleaseBuffer();
size_t GetBufferSize() const { return underlying_buffer_.GetSize(); }
std::intptr_t BasePointer() const {
return reinterpret_cast<std::intptr_t>(underlying_buffer_.GetPtr());
}
void DumpDebugInfo(const std::string& name,
const std::vector<int>& execution_plan) const;
private:
bool committed_;
size_t high_water_mark_;
ResizableAlignedBuffer underlying_buffer_;
std::vector<ArenaAllocWithUsageInterval> active_allocs_;
};
}
#endif
#include "tensorflow/lite/simple_memory_arena.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/macros.h"
#ifdef TF_LITE_TENSORFLOW_PROFILER
#include "tensorflow/lite/tensorflow_profiler_logger.h"
#endif
#if defined(__ANDROID__)
#define TF_LITE_HAS_ALIGNED_ALLOC (__ANDROID_API__ >= 28)
#elif defined(__APPLE__)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif defined(_WIN32)
#define TF_LITE_HAS_ALIGNED_ALLOC 0
#elif __cplusplus >= 201703L || __STDC_VERSION__ >= 201112L
#define TF_LITE_HAS_ALIGNED_ALLOC 1
#endif
namespace {
template <typename T>
T AlignTo(size_t alignment, T offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer);
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment);
#if defined(_WIN32)
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(_aligned_malloc(size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
_aligned_free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
char* pointer = reinterpret_cast<char*>(
_aligned_realloc(old_buffer.pointer, new_size, alignment));
char* aligned_ptr = pointer;
return {pointer, aligned_ptr};
}
#else
tflite::PointerAlignedPointerPair AlignedAlloc(size_t size, size_t alignment) {
#if TF_LITE_HAS_ALIGNED_ALLOC
const size_t allocation_size = AlignTo(alignment, size + alignment - 1);
char* pointer =
reinterpret_cast<char*>(::aligned_alloc(alignment, allocation_size));
char* aligned_ptr = pointer;
#else
const size_t allocation_size = size + alignment - 1;
char* pointer = reinterpret_cast<char*>(std::malloc(allocation_size));
char* aligned_ptr = reinterpret_cast<char*>(
AlignTo(alignment, reinterpret_cast<std::uintptr_t>(pointer)));
#endif
#if defined(__clang__)
#if __has_feature(memory_sanitizer)
std::memset(pointer, 0, allocation_size);
#endif
#endif
return {pointer, aligned_ptr};
}
void AlignedFree(const tflite::PointerAlignedPointerPair& buffer) {
std::free(buffer.pointer);
}
tflite::PointerAlignedPointerPair AlignedRealloc(
const tflite::PointerAlignedPointerPair& old_buffer, size_t old_size,
size_t new_size, size_t alignment) {
tflite::PointerAlignedPointerPair new_buffer =
AlignedAlloc(new_size, alignment);
if (new_size > 0 && old_size > 0) {
const size_t copy_amount = std::min(new_size, old_size);
std::memcpy(new_buffer.aligned_pointer, old_buffer.aligned_pointer,
copy_amount);
}
AlignedFree(old_buffer);
return new_buffer;
}
#endif
}
namespace tflite {
bool ResizableAlignedBuffer::Resize(size_t new_size) {
if (new_size <= data_size_) {
return false;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(true);
OnTfLiteArenaAlloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
new_size);
if (data_size_ > 0) {
OnTfLiteArenaDealloc(subgraph_index_,
reinterpret_cast<std::uintptr_t>(this), data_size_);
}
#endif
auto new_buffer = AlignedRealloc(buffer_, data_size_, new_size, alignment_);
bool reallocated = (new_buffer.aligned_pointer != buffer_.aligned_pointer);
buffer_ = new_buffer;
data_size_ = new_size;
#ifdef TF_LITE_TENSORFLOW_PROFILER
PauseHeapMonitoring(false);
#endif
return reallocated;
}
void ResizableAlignedBuffer::Release() {
if (buffer_.pointer == nullptr) {
return;
}
#ifdef TF_LITE_TENSORFLOW_PROFILER
OnTfLiteArenaDealloc(subgraph_index_, reinterpret_cast<std::uintptr_t>(this),
data_size_);
#endif
AlignedFree(buffer_);
buffer_.pointer = nullptr;
buffer_.aligned_pointer = nullptr;
data_size_ = 0;
}
void SimpleMemoryArena::PurgeAfter(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].first_node > node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::PurgeActiveAllocs(int32_t node) {
for (int i = 0; i < active_allocs_.size(); ++i) {
if (active_allocs_[i].last_node < node) {
active_allocs_[i].tensor = -1;
}
}
active_allocs_.erase(
std::remove_if(active_allocs_.begin(), active_allocs_.end(),
[](ArenaAllocWithUsageInterval& alloc) {
return alloc.tensor == -1;
}),
active_allocs_.end());
}
void SimpleMemoryArena::CalculateActiveAllocs(
const std::vector<ArenaAllocWithUsageInterval>& allocs, int32_t node) {
active_allocs_.clear();
for (int i = 0; i < allocs.size(); ++i) {
if (allocs[i].first_node <= node && allocs[i].last_node >= node) {
active_allocs_.push_back(allocs[i]);
}
}
std::sort(active_allocs_.begin(), active_allocs_.end());
}
void SimpleMemoryArena::ResetAllocs() { active_allocs_.clear(); }
TfLiteStatus SimpleMemoryArena::Allocate(
TfLiteContext* context, size_t alignment, size_t size, int32_t tensor,
int32_t first_node, int32_t last_node,
ArenaAllocWithUsageInterval* new_alloc) {
TF_LITE_ENSURE(context, alignment <= underlying_buffer_.GetAlignment());
new_alloc->tensor = tensor;
new_alloc->first_node = first_node;
new_alloc->last_node = last_node;
new_alloc->size = size;
if (size == 0) {
new_alloc->offset = 0;
return kTfLiteOk;
}
const size_t kOffsetNotAssigned = std::numeric_limits<size_t>::max();
size_t best_offset = kOffsetNotAssigned;
size_t best_offset_fit = kOffsetNotAssigned;
size_t current_offset = 0;
for (const auto& alloc : active_allocs_) {
if (alloc.last_node < first_node || alloc.first_node > last_node) {
continue;
}
size_t aligned_current_offset = AlignTo(alignment, current_offset);
if (aligned_current_offset + size <= alloc.offset &&
alloc.offset - aligned_current_offset < best_offset_fit) {
best_offset = aligned_current_offset;
best_offset_fit = alloc.offset - current_offset;
}
current_offset = std::max(current_offset, alloc.offset + alloc.size);
if (best_offset_fit == 0) {
break;
}
}
if (best_offset == kOffsetNotAssigned) {
best_offset = AlignTo(alignment, current_offset);
}
high_water_mark_ = std::max(high_water_mark_, best_offset + size);
new_alloc->offset = best_offset;
auto insertion_it = std::upper_bound(active_allocs_.begin(),
active_allocs_.end(), *new_alloc);
active_allocs_.insert(insertion_it, *new_alloc);
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::Commit(bool* arena_reallocated) {
*arena_reallocated = underlying_buffer_.Resize(high_water_mark_);
committed_ = true;
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ResolveAlloc(
TfLiteContext* context, const ArenaAllocWithUsageInterval& alloc,
char** output_ptr) {
TF_LITE_ENSURE(context, committed_);
TF_LITE_ENSURE(context, output_ptr != nullptr);
TF_LITE_ENSURE(context,
underlying_buffer_.GetSize() >= (alloc.offset + alloc.size));
if (alloc.size == 0) {
*output_ptr = nullptr;
} else {
*output_ptr = underlying_buffer_.GetPtr() + alloc.offset;
}
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ClearPlan() {
committed_ = false;
high_water_mark_ = 0;
active_allocs_.clear();
return kTfLiteOk;
}
TfLiteStatus SimpleMemoryArena::ReleaseBuffer() {
committed_ = false;
underlying_buffer_.Release();
return kTfLiteOk;
}
TFLITE_ATTRIBUTE_WEAK void DumpArenaInfo(
const std::string& name, const std::vector<int>& execution_plan,
size_t arena_size, const std::vector<ArenaAllocWithUsageInterval>& allocs) {
}
void SimpleMemoryArena::DumpDebugInfo(
const std::string& name, const std::vector<int>& execution_plan) const {
tflite::DumpArenaInfo(name, execution_plan, underlying_buffer_.GetSize(),
active_allocs_);
}
} | #include "tensorflow/lite/simple_memory_arena.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
void ReportError(TfLiteContext* context, const char* format, ...) {}
TEST(SimpleMemoryArenaTest, BasicArenaOperations) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[6];
arena.Allocate(&context, 32, 2047, 0, 1, 3, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 2, 5, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 3, 6, &allocs[2]);
arena.Allocate(&context, 32, 2047, 3, 5, 6, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 4, 6, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 6, 6, &allocs[5]);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 6144);
EXPECT_EQ(allocs[5].offset, 2048);
}
TEST(SimpleMemoryArenaTest, BasicZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval alloc;
ASSERT_EQ(arena.Allocate(&context, 32, 0, 0, 1, 2, &alloc), kTfLiteOk);
EXPECT_EQ(alloc.offset, 0);
EXPECT_EQ(alloc.size, 0);
char* resolved_ptr = nullptr;
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
EXPECT_EQ(resolved_ptr, nullptr);
}
TEST(SimpleMemoryArenaTest, InterleavedZeroAlloc) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[4];
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 0, 0, 4, &allocs[0]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 0, 1, 1, 2, &allocs[1]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 1023, 2, 1, 2, &allocs[2]), kTfLiteOk);
ASSERT_EQ(arena.Allocate(&context, 32, 2047, 3, 3, 4, &allocs[3]), kTfLiteOk);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 0);
EXPECT_EQ(allocs[2].offset, 2048);
EXPECT_EQ(allocs[3].offset, 2048);
}
TEST(SimpleMemoryArenaTest, TestClearPlan) {
TfLiteContext context;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2, 1, 2, &allocs[2]);
bool reallocated = false;
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[0].offset, 0);
EXPECT_EQ(allocs[1].offset, 2048);
EXPECT_EQ(allocs[2].offset, 4096);
arena.ClearPlan();
arena.Allocate(&context, 32, 1023, 3, 0, 2, &allocs[3]);
arena.Allocate(&context, 32, 1023, 4, 1, 2, &allocs[4]);
arena.Allocate(&context, 32, 1023, 5, 1, 2, &allocs[5]);
arena.Commit(&reallocated);
ASSERT_FALSE(reallocated);
EXPECT_EQ(allocs[3].offset, 0);
EXPECT_EQ(allocs[4].offset, 1024);
EXPECT_EQ(allocs[5].offset, 2048);
arena.ClearPlan();
arena.Allocate(&context, 32, 4095, 6, 0, 2, &allocs[6]);
arena.Allocate(&context, 32, 4095, 7, 1, 2, &allocs[7]);
arena.Allocate(&context, 32, 4095, 8, 1, 2, &allocs[8]);
arena.Commit(&reallocated);
ASSERT_TRUE(reallocated);
EXPECT_EQ(allocs[6].offset, 0);
EXPECT_EQ(allocs[7].offset, 4096);
EXPECT_EQ(allocs[8].offset, 8192);
}
TEST(SimpleMemoryArenaTest, TestPurgeAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[5];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.PurgeActiveAllocs(4);
arena.Allocate(&context, 32, 13, 3,
4, 5, &allocs[4]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[4], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[4].offset, 0);
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[0].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestResetAllocs) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0,
0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1,
1, 2, &allocs[1]);
arena.Allocate(&context, 32, 2047, 2,
2, 3, &allocs[2]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr0 = nullptr;
char* resolved_ptr1 = nullptr;
char* resolved_ptr2 = nullptr;
char* resolved_ptr3 = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
arena.Allocate(&context, 32, 13, 0,
0, 3, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr0), kTfLiteOk);
EXPECT_NE(resolved_ptr0, nullptr);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr1), kTfLiteOk);
EXPECT_EQ(resolved_ptr1, resolved_ptr0 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[2], &resolved_ptr2), kTfLiteOk);
EXPECT_EQ(resolved_ptr2, resolved_ptr1 + 2048);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
EXPECT_EQ(resolved_ptr3, resolved_ptr2 + 2048);
arena.ResetAllocs();
arena.Allocate(&context, 32, 13, 0,
0, 2, &allocs[3]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[3], &resolved_ptr3), kTfLiteOk);
ASSERT_EQ(allocs[3].offset, 0);
}
TEST(SimpleMemoryArenaTest, TestClearBuffer) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.BasePointer(), 0);
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
ASSERT_NE(arena.BasePointer(), 0);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
class BufferAndPlanClearingTest : public ::testing::Test,
public ::testing::WithParamInterface<bool> {};
TEST_P(BufferAndPlanClearingTest, TestClearBufferAndClearPlan) {
TfLiteContext context;
context.ReportError = ReportError;
SimpleMemoryArena arena(64);
ArenaAllocWithUsageInterval allocs[9];
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
bool reallocated = false;
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
if (GetParam()) {
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
} else {
ASSERT_EQ(arena.ClearPlan(), kTfLiteOk);
ASSERT_EQ(arena.ReleaseBuffer(), kTfLiteOk);
}
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
EXPECT_FALSE(reallocated);
char* resolved_ptr = nullptr;
ASSERT_NE(arena.ResolveAlloc(&context, allocs[0], &resolved_ptr), kTfLiteOk);
arena.Allocate(&context, 32, 2047, 0, 0, 2, &allocs[0]);
arena.Allocate(&context, 32, 2047, 1, 1, 2, &allocs[1]);
ASSERT_EQ(arena.Commit(&reallocated), kTfLiteOk);
ASSERT_TRUE(reallocated);
resolved_ptr = nullptr;
ASSERT_EQ(arena.ResolveAlloc(&context, allocs[1], &resolved_ptr), kTfLiteOk);
EXPECT_NE(resolved_ptr, nullptr);
}
INSTANTIATE_TEST_SUITE_P(BufferAndPlanClearingTest, BufferAndPlanClearingTest,
::testing::Values(true, false));
}
} |
43 | #ifndef XLA_STREAM_EXECUTOR_GPU_REDZONE_ALLOCATOR_H_
#define XLA_STREAM_EXECUTOR_GPU_REDZONE_ALLOCATOR_H_
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
class RedzoneAllocator : public ScratchAllocator {
public:
static constexpr int64_t kDefaultRedzoneSize =
1LL << 23;
static constexpr uint8_t kDefaultRedzonePattern = -1;
RedzoneAllocator(Stream* stream, DeviceMemoryAllocator* memory_allocator,
const GpuAsmOpts& gpu_compilation_opts_,
int64_t memory_limit = (1LL << 32),
int64_t redzone_size = kDefaultRedzoneSize,
uint8_t redzone_pattern = kDefaultRedzonePattern);
int64_t GetMemoryLimitInBytes() override { return memory_limit_; }
int64_t TotalAllocatedBytesExcludingRedzones() const {
return allocated_bytes_excluding_redzones_;
}
absl::StatusOr<DeviceMemory<uint8>> AllocateBytes(int64_t byte_size) override;
struct RedzoneCheckStatus {
RedzoneCheckStatus() = default;
RedzoneCheckStatus(absl::string_view buffer_name, void* user_buffer_address,
int64_t offset, uint64_t expected_value,
uint64_t actual_value)
: buffer_name(buffer_name),
user_buffer_address(user_buffer_address),
offset(offset),
expected_value(expected_value),
actual_value(actual_value) {}
static RedzoneCheckStatus OK() { return {}; }
bool ok() { return user_buffer_address == nullptr; }
std::string RedzoneFailureMsg() const;
std::string buffer_name = {};
void* user_buffer_address = nullptr;
int64_t offset = 0;
uint64_t expected_value = 0;
uint64_t actual_value = 0;
};
absl::StatusOr<RedzoneCheckStatus> CheckRedzones() const;
Stream* stream() const { return stream_; }
private:
const int device_ordinal_;
Stream* stream_;
const int64_t memory_limit_;
const int64_t redzone_size_;
const uint8_t redzone_pattern_;
DeviceMemoryAllocator* memory_allocator_;
GpuAsmOpts gpu_compilation_opts_;
std::vector<std::pair<OwningDeviceMemory, int64_t>> allocated_buffers_;
int64_t allocated_bytes_excluding_redzones_ = 0;
};
}
#endif
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/redzone_allocator_kernel.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "tsl/lib/math/math_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
template <typename T>
static T RoundUpToNearest(T value, T divisor) {
return tsl::MathUtil::CeilOfRatio(value, divisor) * divisor;
}
constexpr int64_t kRhsRedzoneAlign = 4;
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
RedzoneAllocator::RedzoneAllocator(Stream* stream,
DeviceMemoryAllocator* memory_allocator,
const GpuAsmOpts& gpu_compilation_opts,
int64_t memory_limit, int64_t redzone_size,
uint8_t redzone_pattern)
: device_ordinal_(stream->parent()->device_ordinal()),
stream_(stream),
memory_limit_(memory_limit),
redzone_size_(RoundUpToNearest(
redzone_size,
static_cast<int64_t>(tsl::Allocator::kAllocatorAlignment))),
redzone_pattern_(redzone_pattern),
memory_allocator_(memory_allocator),
gpu_compilation_opts_(gpu_compilation_opts) {}
absl::StatusOr<DeviceMemory<uint8_t>> RedzoneAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
int64_t rhs_slop = RoundUpToNearest(byte_size, kRhsRedzoneAlign) - byte_size;
TF_ASSIGN_OR_RETURN(
OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_,
byte_size + 2 * redzone_size_ + rhs_slop,
false));
allocated_bytes_excluding_redzones_ += byte_size;
static_assert(sizeof(uint8_t) == 1, "Unexpected size");
DeviceMemory<uint8_t> allocated_buffer_memory(*allocated_buffer);
DeviceMemory<uint8_t> lhs_redzone =
allocated_buffer_memory.GetSlice(0, redzone_size_);
DeviceMemory<uint8_t> data_chunk =
allocated_buffer_memory.GetSlice(redzone_size_, byte_size);
DeviceMemory<uint8_t> rhs_redzone_slop =
allocated_buffer_memory.GetSlice(redzone_size_ + byte_size, rhs_slop);
DeviceMemory<uint8_t> rhs_redzone_nonslop = allocated_buffer_memory.GetSlice(
redzone_size_ + byte_size + rhs_slop, redzone_size_);
uint8_t pattern_arr[] = {redzone_pattern_, redzone_pattern_, redzone_pattern_,
redzone_pattern_};
uint32_t pattern32;
std::memcpy(&pattern32, pattern_arr, sizeof(pattern32));
TF_RETURN_IF_ERROR(stream_->Memset32(&lhs_redzone, pattern32, redzone_size_));
if (rhs_slop != 0) {
TF_RETURN_IF_ERROR(
stream_->Memcpy(&rhs_redzone_slop, &pattern32, rhs_slop));
}
TF_RETURN_IF_ERROR(
stream_->Memset32(&rhs_redzone_nonslop, pattern32, redzone_size_));
allocated_buffers_.emplace_back(std::move(allocated_buffer), byte_size);
return data_chunk;
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzoneHost(
DeviceMemoryBase redzone, DeviceMemoryBase user_allocation,
absl::string_view name, Stream* stream, uint8_t redzone_pattern) {
uint64_t size = redzone.size();
auto redzone_data = std::make_unique<uint8_t[]>(size);
TF_RETURN_IF_ERROR(stream->Memcpy(redzone_data.get(), redzone, size));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
std::array<uint8_t, sizeof(uint64_t)> pattern_arr;
pattern_arr.fill(redzone_pattern);
uint64_t pattern64;
std::memcpy(&pattern64, pattern_arr.data(), sizeof(uint64_t));
int64_t i;
for (i = 0; i + 7 < size; i += sizeof(uint64_t)) {
uint64_t rz_value = *reinterpret_cast<uint64_t*>(&redzone_data[i]);
if (rz_value != pattern64) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i, pattern64,
rz_value);
}
}
for (; i < size; ++i) {
uint8_t rz_value = redzone_data[i];
if (rz_value != redzone_pattern) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i,
redzone_pattern, rz_value);
}
}
return RedzoneCheckStatus::OK();
}
static absl::Status RunRedzoneChecker(
Stream* stream, const DeviceMemory<uint8_t>& redzone,
uint8_t redzone_pattern, const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel) {
StreamExecutor* executor = stream->parent();
if (redzone.size() == 0) {
return absl::OkStatus();
}
int64_t num_elements = redzone.size();
int64_t threads_per_block = std::min(
executor->GetDeviceDescription().threads_per_block_limit(), num_elements);
int64_t block_count =
tsl::MathUtil::CeilOfRatio(num_elements, threads_per_block);
TF_RETURN_IF_ERROR(stream->ThenLaunch(
ThreadDim(threads_per_block), BlockDim(block_count), comparison_kernel,
redzone, redzone_pattern, redzone.size(), out_param));
return absl::OkStatus();
}
static absl::Status ReinitializeRedzone(Stream* stream,
DeviceMemoryBase redzone,
uint8_t redzone_pattern) {
absl::FixedArray<uint8_t> redzone_array(redzone.size());
redzone_array.fill(redzone_pattern);
TF_RETURN_IF_ERROR(
stream->Memcpy(&redzone, redzone_array.data(), redzone.size()));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
return absl::OkStatus();
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzonesForBuffer(
Stream* stream, DeviceMemoryBase memory,
const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel, int64_t user_allocation_size,
uint64_t redzone_size, uint8_t redzone_pattern) {
int64_t rhs_slop =
RoundUpToNearest<int64_t>(user_allocation_size, kRhsRedzoneAlign) -
user_allocation_size;
CHECK_EQ(memory.size(), user_allocation_size + rhs_slop + 2 * redzone_size);
DeviceMemory<uint8_t> buffer_uint8(memory);
DeviceMemory<uint8_t> lhs_redzone =
buffer_uint8.GetSlice(0,
redzone_size);
DeviceMemory<uint8_t> user_allocation =
buffer_uint8.GetSlice(redzone_size,
user_allocation_size);
DeviceMemory<uint8_t> rhs_redzone =
buffer_uint8.GetSlice(redzone_size + user_allocation_size,
redzone_size + rhs_slop);
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, lhs_redzone, redzone_pattern,
out_param, comparison_kernel));
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, rhs_redzone, redzone_pattern,
out_param, comparison_kernel));
int64_t result;
CHECK_EQ(out_param.size(), sizeof(result));
TF_RETURN_IF_ERROR(stream->Memcpy(&result, out_param, sizeof(result)));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
if (result != 0) {
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus lhs_check,
CheckRedzoneHost(lhs_redzone, user_allocation, "LHS",
stream, redzone_pattern));
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus rhs_check,
CheckRedzoneHost(rhs_redzone, user_allocation, "RHS",
stream, redzone_pattern));
CHECK(!lhs_check.ok() || !rhs_check.ok())
<< "Mismatched results with host and device comparison";
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, lhs_redzone, redzone_pattern));
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, rhs_redzone, redzone_pattern));
return !lhs_check.ok() ? lhs_check : rhs_check;
}
return RedzoneCheckStatus::OK();
}
absl::StatusOr<RedzoneCheckStatus> RedzoneAllocator::CheckRedzones() const {
StreamExecutor* executor = stream_->parent();
TF_ASSIGN_OR_RETURN(
const ComparisonKernel* kernel,
GetComparisonKernel(stream_->parent(), gpu_compilation_opts_));
stream_executor::DeviceMemoryHandle out_param(
executor, executor->AllocateScalar<uint64_t>());
TF_RETURN_IF_ERROR(
stream_->MemZero(out_param.memory_ptr(), sizeof(uint64_t)));
for (const auto& buf_and_size : allocated_buffers_) {
TF_ASSIGN_OR_RETURN(
RedzoneCheckStatus redzone_status,
CheckRedzonesForBuffer(stream_, *buf_and_size.first,
DeviceMemory<uint64_t>(out_param.memory()),
*kernel, buf_and_size.second, redzone_size_,
redzone_pattern_));
if (!redzone_status.ok()) {
return redzone_status;
}
}
return RedzoneCheckStatus::OK();
}
std::string RedzoneCheckStatus::RedzoneFailureMsg() const {
return absl::StrFormat(
"Redzone mismatch in %s redzone of buffer %p at offset %d; "
"expected %08x but was %08x.",
buffer_name, user_buffer_address, offset, expected_value, actual_value);
}
} | #include "xla/stream_executor/gpu/redzone_allocator.h"
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace gpu {
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
static void EXPECT_REDZONE_OK(absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_TRUE(status.value().ok());
}
static void EXPECT_REDZONE_VIOLATION(
absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_FALSE(status.value().ok());
}
TEST(RedzoneAllocatorTest, WriteToRedzone) {
constexpr int64_t kRedzoneSize = 1 << 23;
constexpr uint8_t kRedzonePattern = 0x7e;
constexpr int64_t kAllocSize = (1 << 25) + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
kRedzonePattern);
TF_ASSERT_OK_AND_ASSIGN(DeviceMemory<uint8_t> buf,
allocator.AllocateBytes(kAllocSize));
EXPECT_REDZONE_OK(allocator.CheckRedzones());
char* buf_addr = reinterpret_cast<char*>(buf.opaque());
DeviceMemoryBase lhs_redzone(buf_addr - kRedzoneSize, kRedzoneSize);
DeviceMemoryBase rhs_redzone(buf_addr + kAllocSize, kRedzoneSize);
auto check_redzone = [&](DeviceMemoryBase redzone, absl::string_view name) {
std::vector<uint8_t> host_buf(kRedzoneSize);
TF_ASSERT_OK(stream->Memcpy(host_buf.data(), redzone, kRedzoneSize));
TF_ASSERT_OK(stream->BlockHostUntilDone());
const int64_t kMaxMismatches = 16;
int64_t mismatches = 0;
for (int64_t i = 0; i < host_buf.size(); ++i) {
if (mismatches == kMaxMismatches) {
ADD_FAILURE() << "Hit max number of mismatches; skipping others.";
break;
}
if (host_buf[i] != kRedzonePattern) {
++mismatches;
EXPECT_EQ(host_buf[i], kRedzonePattern)
<< "at index " << i << " of " << name << " redzone";
}
}
};
check_redzone(lhs_redzone, "lhs");
check_redzone(rhs_redzone, "rhs");
auto modify_redzone = [&](DeviceMemoryBase redzone, int64_t offset,
absl::string_view name) {
SCOPED_TRACE(absl::StrCat(name, ", offset=", offset));
DeviceMemoryBase redzone_at_offset(
reinterpret_cast<char*>(redzone.opaque()) + offset, 1);
char old_redzone_value = 0;
{ EXPECT_REDZONE_OK(allocator.CheckRedzones()); }
TF_ASSERT_OK(stream->Memcpy(&old_redzone_value, redzone_at_offset, 1));
TF_ASSERT_OK(stream->MemZero(&redzone_at_offset, 1));
EXPECT_REDZONE_VIOLATION(allocator.CheckRedzones());
EXPECT_REDZONE_OK(allocator.CheckRedzones());
};
modify_redzone(lhs_redzone, 0, "lhs");
modify_redzone(lhs_redzone, kRedzoneSize - 1, "lhs");
modify_redzone(rhs_redzone, 0, "rhs");
modify_redzone(rhs_redzone, kRedzoneSize - 1, "rhs");
}
TEST(RedzoneAllocatorTest, VeryLargeRedzone) {
constexpr int64_t kRedzoneSize = 65535 * 1024 + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
-1);
(void)allocator.AllocateBytes(1);
EXPECT_REDZONE_OK(allocator.CheckRedzones());
}
}
} |
44 | #ifndef TENSORFLOW_CORE_GRAPH_OPTIMIZER_CSE_H_
#define TENSORFLOW_CORE_GRAPH_OPTIMIZER_CSE_H_
#include <sys/types.h>
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
extern bool OptimizeCSE(Graph* g,
const std::function<bool(const Node*)>& consider_fn);
}
#endif
#include "tensorflow/core/graph/optimizer_cse.h"
#include <iostream>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
class OptimizerCSE {
public:
explicit OptimizerCSE(Graph* g) : g_(g) {}
bool Optimize(const std::function<bool(const Node*)>& consider_fn);
private:
static size_t NodeHash(const Node* n);
static bool Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch);
Graph* g_;
};
static void FillInputs(
const Node* n, absl::InlinedVector<const Node*, 4UL>* control_edges,
absl::InlinedVector<std::pair<const Node*, int>, 4UL>* in) {
DCHECK_EQ(in->size(), n->num_inputs());
control_edges->clear();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
control_edges->push_back(e->src());
} else {
(*in)[e->dst_input()] = std::make_pair(e->src(), e->src_output());
}
}
std::sort(control_edges->begin(), control_edges->end());
if (n->op_def().is_commutative()) {
std::sort(in->begin(), in->end());
}
}
static size_t kIllegalNodeHash = 0;
class Hasher {
public:
uint64 hash() { return h_ == kIllegalNodeHash ? kIllegalNodeHash + 1 : h_; }
void MixString(const string& s) { h_ = Hash64(s.data(), s.size(), h_); }
void MixInteger(size_t z) { h_ = Hash64Combine(h_, z); }
void MixProto(const protobuf::MessageLite& msg) {
msg.ByteSizeLong();
HashingOutputStream hasher;
{
protobuf::io::CodedOutputStream stream(&hasher);
stream.EnableAliasing(true);
stream.SetSerializationDeterministic(true);
msg.SerializeWithCachedSizes(&stream);
}
h_ = Hash64Combine(h_, hasher.hash());
}
private:
class HashingOutputStream : public protobuf::io::ZeroCopyOutputStream {
public:
static constexpr size_t kBufSize = 228;
static constexpr uint64 kDefaultSeed = 2570847921467975139ULL;
bool Next(void** data, int* size) override {
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
*data = buf_;
*size = kBufSize;
} else {
*data = buf_ + i_;
*size = kBufSize - i_;
}
i_ = kBufSize;
return true;
}
void BackUp(int count) override { i_ -= count; }
int64_t ByteCount() const override { return byte_count_; }
bool WriteAliasedRaw(const void* void_data, int size) override {
const char* data = static_cast<const char*>(void_data);
const auto remaining = kBufSize - i_;
if (remaining > 0) {
if (size < remaining) {
memcpy(buf_ + i_, data, size);
i_ += size;
return true;
}
memcpy(buf_ + i_, data, remaining);
i_ = kBufSize;
data += remaining;
size -= remaining;
}
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
i_ = 0;
}
while (size >= kBufSize) {
Mix(data, kBufSize);
data += kBufSize;
size -= kBufSize;
}
memcpy(buf_, data, size);
i_ = size;
return true;
}
bool AllowsAliasing() const override { return true; }
uint64 hash() {
if (i_ != 0) {
Mix(buf_, i_);
i_ = 0;
}
return h_;
}
private:
void Mix(const char* p, size_t n) {
byte_count_ += n;
h_ = Hash64(p, n, h_);
}
char buf_[kBufSize];
int i_ = 0;
int64_t byte_count_ = 0;
uint64 h_ = kDefaultSeed;
};
uint64 h_ = HashingOutputStream::kDefaultSeed;
};
size_t OptimizerCSE::NodeHash(const Node* n) {
Hasher hasher;
hasher.MixString(n->type_string());
hasher.MixInteger(n->output_types().size());
for (DataType dt : n->output_types()) {
hasher.MixInteger(dt);
}
hasher.MixInteger(n->num_inputs());
absl::InlinedVector<const Node*, 4UL> control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> in(n->num_inputs());
FillInputs(n, &control_edges, &in);
for (const auto& edge : in) {
hasher.MixInteger(edge.first->id());
hasher.MixInteger(edge.second);
}
#if !defined(__ANDROID__)
size_t attr_hashes = 0;
for (const auto& attr : n->attrs()) {
Hasher h;
h.MixString(attr.first);
h.MixProto(attr.second);
attr_hashes = Hash64CombineUnordered(attr_hashes, h.hash());
}
hasher.MixInteger(attr_hashes);
#endif
return hasher.hash();
}
static bool HasRefInput(const Node* n) {
for (auto dt : n->input_types()) {
if (IsRefType(dt)) return true;
}
return false;
}
bool OptimizerCSE::Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch) {
if (a->type_string() != b->type_string()) return false;
if (a->op_def().is_stateful()) return false;
if (HasRefInput(a) || HasRefInput(b)) return false;
if (!a->attrs().EqualAttrs(b->attrs(), scratch)) return false;
if (a->num_inputs() != b->num_inputs()) return false;
const int N_in = a->num_inputs();
absl::InlinedVector<const Node*, 4UL> a_control_edges;
absl::InlinedVector<const Node*, 4UL> b_control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> a_in(N_in);
absl::InlinedVector<std::pair<const Node*, int>, 4UL> b_in(N_in);
FillInputs(a, &a_control_edges, &a_in);
FillInputs(b, &b_control_edges, &b_in);
if (a_in != b_in) return false;
if (a_control_edges != b_control_edges) return false;
return true;
}
bool OptimizerCSE::Optimize(
const std::function<bool(const Node*)>& consider_fn) {
std::vector<Node*> order;
GetReversePostOrder(*g_, &order, NodeComparatorID());
std::unordered_map<size_t, Node*> available;
bool changed = false;
AttrSlice::Scratch scratch;
for (Node* n : order) {
if (!n->IsOp()) continue;
if (n->type_string() == "Placeholder" ||
n->type_string() == "PlaceholderV2" ||
n->type_string() == "PlaceholderWithDefault") {
continue;
}
if (consider_fn != nullptr && !consider_fn(n)) continue;
size_t h = NodeHash(n);
Node** candidate = &available[h];
if (*candidate == nullptr) {
*candidate = n;
} else if (Equivalent(*candidate, n, &scratch)) {
VLOG(1) << "CSE: equivalent: " << (*candidate)->name() << " and "
<< n->name();
for (const Edge* e : n->out_edges()) {
g_->AddEdge(*candidate, e->src_output(), e->dst(), e->dst_input());
}
MergeDebugInfo(NodeDebugInfo(*n), *candidate);
g_->RemoveNode(n);
changed = true;
}
}
return changed;
}
bool OptimizeCSE(Graph* g,
const std::function<bool(const Node*)>& consider_fn) {
OptimizerCSE opt(g);
return opt.Optimize(consider_fn);
}
} | #include "tensorflow/core/graph/optimizer_cse.h"
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
static void InitGraph(const string& s, Graph* graph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
}
class OptimizerCSETest : public ::testing::Test {
public:
OptimizerCSETest() : graph_(OpRegistry::Global()) {}
void InitGraph(const string& s) {
::tensorflow::InitGraph(s, &graph_);
original_ = CanonicalGraphString(&graph_);
}
static bool IncludeNode(const Node* n) { return n->IsOp(); }
static string EdgeId(const Node* n, int index) {
if (index == 0) {
return n->name();
} else if (index == Graph::kControlSlot) {
return strings::StrCat(n->name(), ":control");
} else {
return strings::StrCat(n->name(), ":", index);
}
}
string CanonicalGraphString(Graph* g) {
std::vector<string> nodes;
std::vector<string> edges;
for (const Node* n : g->nodes()) {
if (IncludeNode(n)) {
nodes.push_back(strings::StrCat(n->name(), "(", n->type_string(), ")"));
}
}
for (const Edge* e : g->edges()) {
if (IncludeNode(e->src()) && IncludeNode(e->dst())) {
edges.push_back(strings::StrCat(EdgeId(e->src(), e->src_output()), "->",
EdgeId(e->dst(), e->dst_input())));
}
}
std::sort(nodes.begin(), nodes.end());
std::sort(edges.begin(), edges.end());
return strings::StrCat(absl::StrJoin(nodes, ";"), "|",
absl::StrJoin(edges, ";"));
}
string DoCSE(const std::function<bool(const Node*)>& consider_fn = nullptr) {
string before = CanonicalGraphString(&graph_);
LOG(ERROR) << "Before rewrites: " << before;
OptimizeCSE(&graph_, consider_fn);
string result = CanonicalGraphString(&graph_);
LOG(ERROR) << "After rewrites: " << result;
return result;
}
const string& OriginalGraph() const { return original_; }
Graph graph_;
string original_;
};
REGISTER_OP("Input").Output("o: float").SetIsStateful();
TEST_F(OptimizerCSETest, Simple) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_ThreeEquivalent) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_WithFixups) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['C', 'D'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul);E(Mul)|"
"A->C;B->C:1;C->E;C->E:1");
}
TEST_F(OptimizerCSETest, Simple_Commutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
static bool IsNotMultiply(const Node* n) { return n->type_string() != "Mul"; }
TEST_F(OptimizerCSETest, Simple_Filtered) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(IsNotMultiply), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_NotCommutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Ops) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, SameConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);D(Mul)|"
"A->D;A->D:1");
}
TEST_F(OptimizerCSETest, DifferentConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 100000 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);B(Const);D(Mul)|"
"A->D;B->D:1");
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 4 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_FLOAT } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Inputs) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Input'}"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'C'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Constant_Dedup) {
Tensor a(DT_FLOAT, TensorShape({1}));
a.flat<float>()(0) = 1.0;
Tensor b(DT_DOUBLE, TensorShape({1}));
b.flat<double>()(0) = 1.0;
Tensor c(DT_FLOAT, TensorShape({1, 1}));
c.flat<float>()(0) = 1.0;
Tensor d(DT_FLOAT, TensorShape({1}));
d.flat<float>()(0) = 2.0;
Graph g(OpRegistry::Global());
for (const auto& val : {a, b, c, d, d, c, b, a}) {
test::graph::Constant(&g, val);
}
GraphDef gdef;
test::graph::ToGraphDef(&g, &gdef);
InitGraph(tsl::LegacyUnredactedDebugString(gdef));
EXPECT_EQ(OriginalGraph(),
"n/_0(Const);n/_1(Const);n/_2(Const);n/_3(Const);"
"n/_4(Const);n/_5(Const);n/_6(Const);n/_7(Const)|");
std::vector<string> nodes = str_util::Split(DoCSE(), ";|");
std::set<string> node_set(nodes.begin(), nodes.end());
EXPECT_EQ(node_set.count("n/_0(Const)") + node_set.count("n/_7(Const)"), 1);
EXPECT_EQ(node_set.count("n/_1(Const)") + node_set.count("n/_6(Const)"), 1);
EXPECT_EQ(node_set.count("n/_2(Const)") + node_set.count("n/_5(Const)"), 1);
EXPECT_EQ(node_set.count("n/_3(Const)") + node_set.count("n/_4(Const)"), 1);
}
void BM_CSE(::testing::benchmark::State& state) {
const int op_nodes = state.range(0);
string s;
for (int in = 0; in < 10; in++) {
s += strings::Printf("node { name: 'in%04d' op: 'Input'}", in);
}
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int op = 0; op < op_nodes; op++) {
s += strings::Printf(
"node { name: 'op%04d' op: 'Mul' attr { key: 'T' value { "
"type: DT_FLOAT } } input: ['in%04d', 'in%04d' ] }",
op, rnd.Uniform(10), rnd.Uniform(10));
}
bool first = true;
for (auto i : state) {
state.PauseTiming();
Graph* graph = new Graph(OpRegistry::Global());
InitGraph(s, graph);
int N = graph->num_node_ids();
if (first) {
state.SetLabel(strings::StrCat("Per graph node. Nodes: ", N));
first = false;
}
{
state.ResumeTiming();
OptimizeCSE(graph, nullptr);
state.PauseTiming();
}
delete graph;
state.ResumeTiming();
}
}
BENCHMARK(BM_CSE)->Arg(1000)->Arg(10000);
}
} |
45 | #ifndef THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_MEMORY_MANAGER_H_
#define THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_MEMORY_MANAGER_H_
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "common/memory.h"
#include "google/protobuf/arena.h"
namespace cel::extensions {
MemoryManager ProtoMemoryManager(google::protobuf::Arena* arena);
inline MemoryManager ProtoMemoryManagerRef(google::protobuf::Arena* arena) {
return ProtoMemoryManager(arena);
}
absl::Nullable<google::protobuf::Arena*> ProtoMemoryManagerArena(
MemoryManager memory_manager);
template <typename T, typename... Args>
ABSL_MUST_USE_RESULT T* NewInProtoArena(MemoryManager memory_manager,
Args&&... args) {
return google::protobuf::Arena::Create<T>(ProtoMemoryManagerArena(memory_manager),
std::forward<Args>(args)...);
}
}
#endif
#include "extensions/protobuf/memory_manager.h"
#include "absl/base/nullability.h"
#include "common/memory.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace extensions {
MemoryManagerRef ProtoMemoryManager(google::protobuf::Arena* arena) {
return arena != nullptr ? MemoryManagerRef::Pooling(arena)
: MemoryManagerRef::ReferenceCounting();
}
absl::Nullable<google::protobuf::Arena*> ProtoMemoryManagerArena(
MemoryManager memory_manager) {
return memory_manager.arena();
}
}
} | #include "extensions/protobuf/memory_manager.h"
#include "common/memory.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel::extensions {
namespace {
using testing::Eq;
using testing::IsNull;
using testing::NotNull;
TEST(ProtoMemoryManager, MemoryManagement) {
google::protobuf::Arena arena;
auto memory_manager = ProtoMemoryManager(&arena);
EXPECT_EQ(memory_manager.memory_management(), MemoryManagement::kPooling);
}
TEST(ProtoMemoryManager, Arena) {
google::protobuf::Arena arena;
auto memory_manager = ProtoMemoryManager(&arena);
EXPECT_THAT(ProtoMemoryManagerArena(memory_manager), NotNull());
}
TEST(ProtoMemoryManagerRef, MemoryManagement) {
google::protobuf::Arena arena;
auto memory_manager = ProtoMemoryManagerRef(&arena);
EXPECT_EQ(memory_manager.memory_management(), MemoryManagement::kPooling);
memory_manager = ProtoMemoryManagerRef(nullptr);
EXPECT_EQ(memory_manager.memory_management(),
MemoryManagement::kReferenceCounting);
}
TEST(ProtoMemoryManagerRef, Arena) {
google::protobuf::Arena arena;
auto memory_manager = ProtoMemoryManagerRef(&arena);
EXPECT_THAT(ProtoMemoryManagerArena(memory_manager), Eq(&arena));
memory_manager = ProtoMemoryManagerRef(nullptr);
EXPECT_THAT(ProtoMemoryManagerArena(memory_manager), IsNull());
}
}
} |
46 | #ifndef TENSORFLOW_CORE_KERNELS_DATA_FLAT_MAP_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_FLAT_MAP_DATASET_OP_H_
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class FlatMapDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "FlatMap";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kOtherArguments = "other_arguments";
static constexpr const char* const kFunc = "f";
static constexpr const char* const kTarguments = "Targuments";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit FlatMapDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
const int graph_def_version_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
std::shared_ptr<FunctionMetadata> func_metadata_ = nullptr;
};
}
}
#endif
#include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/flat_map_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const FlatMapDatasetOp::kDatasetType;
constexpr const char* const FlatMapDatasetOp::kInputDataset;
constexpr const char* const FlatMapDatasetOp::kOtherArguments;
constexpr const char* const FlatMapDatasetOp::kFunc;
constexpr const char* const FlatMapDatasetOp::kTarguments;
constexpr const char* const FlatMapDatasetOp::kOutputTypes;
constexpr const char* const FlatMapDatasetOp::kOutputShapes;
constexpr int64_t kMaxRandomIndexingCardinality = 100;
constexpr char kCycleLength[] = "cycle_length";
constexpr char kElementIndex[] = "element_index";
constexpr char kInputsSize[] = "inputs_size";
constexpr char kInputs[] = "inputs";
constexpr char kCurrentElementIteratorUninitialized[] =
"current_element_iterator_uninitialized";
constexpr char kExhausted[] = "exhausted";
class FlatMapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
output_types_(output_types),
output_shapes_(output_shapes),
random_access_handler_(ctx, input, *captured_func_) {
input_->Ref();
random_indexing_compatible_ = input_->RandomIndexingCompatible();
if (random_indexing_compatible_.ok() &&
input_->Cardinality() > kMaxRandomIndexingCardinality) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("The cardinality of the input to ", type_string(),
" is too large to support global shuffling. It is ",
input_->Cardinality(), ", which is greater than ",
kMaxRandomIndexingCardinality));
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (options.compute_level() <
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE) {
return kUnknownCardinality;
}
absl::StatusOr<int64_t> cardinality = random_access_handler_.Cardinality();
if (!cardinality.ok()) {
LOG(ERROR) << "Unable to compute cardinality for dataset "
<< DebugString() << " due to error: " << cardinality.status();
return kUnknownCardinality;
}
return *cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {std::make_pair(0, input_graph_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f),
std::make_pair(kTarguments, other_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry());
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper()) {
return Get(ctx, out_tensors, end_of_sequence);
}
mutex_lock l(mu_);
do {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
TF_RETURN_IF_ERROR(current_element_iterator_->GetNext(
&nested_ctx, out_tensors, &end_of_element));
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
*end_of_sequence = false;
return absl::OkStatus();
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, true));
} while (true);
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
mutex_lock l(mu_);
*num_skipped = 0;
while (*num_skipped < num_to_skip) {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
int last_num_skipped;
TF_RETURN_IF_ERROR(current_element_iterator_->Skip(
&nested_ctx, num_to_skip - *num_skipped, &end_of_element,
&last_num_skipped));
*num_skipped += last_num_skipped;
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
if (*num_skipped != num_to_skip) {
return absl::InternalError(absl::StrFormat(
"Expected `num_skipped` and `num_to_skip` to be the same. Got"
" %d(num_skipped) and %d(num_to_skip)",
*num_skipped, num_to_skip));
}
continue;
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
}
*end_of_sequence = false;
return absl::OkStatus();
}
absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(size_t parent_index,
ctx->index_mapper()(element_count_));
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
absl::StatusOr<int64_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(dataset_index.status());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
next_positions_.resize(dataset_iterators_.size(), 0);
input_element_counts_.resize(dataset_iterators_.size(), 0);
}
IteratorContext::Params params(ctx);
params.index_mapper =
GetFlatMapIndexMapper(ctx->index_mapper(), *dataset_index);
IteratorContext global_shuffle_ctx(std::move(params));
TF_RETURN_IF_ERROR(dataset_iterators_[*dataset_index]->GetNext(
&global_shuffle_ctx, out_tensors, end_of_sequence));
ctx->MergeCheckpoint(global_shuffle_ctx.checkpoint());
++element_count_;
++input_element_counts_[*dataset_index];
return absl::OkStatus();
}
IndexMapperFn GetFlatMapIndexMapper(IndexMapperFn parent_index_mapper,
size_t input_dataset_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::StatusOr<int64_t> cardinality =
dataset()->random_access_handler_.Cardinality();
return [this, parent_index_mapper = std::move(parent_index_mapper),
input_dataset_index, cardinality = std::move(cardinality)](
size_t element_position) -> absl::StatusOr<size_t> {
if (!cardinality.ok() || *cardinality < 0) {
return absl::FailedPreconditionError(
"Global shuffling requires finite cardinalities.");
}
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
while (next_positions_[input_dataset_index] < *cardinality) {
size_t index = next_positions_[input_dataset_index];
if (parent_index_mapper != nullptr) {
TF_ASSIGN_OR_RETURN(index, parent_index_mapper(index));
}
++next_positions_[input_dataset_index];
TF_ASSIGN_OR_RETURN(int64_t shuffled_dataset_index,
random_access.GetDatasetIndex(index));
if (input_dataset_index == shuffled_dataset_index) {
if (input_dataset_index > 0) {
TF_ASSIGN_OR_RETURN(
int64_t cumulative_cardinality,
random_access.CumulativeCardinality(input_dataset_index - 1));
index -= cumulative_cardinality;
}
return index;
}
}
return *cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args),
{model::MakeNonTunableParameter(kCycleLength, 1)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override
TF_LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kExhausted, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kElementIndex, element_index_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kCurrentElementIteratorUninitialized,
static_cast<int64_t>(!current_element_iterator_)));
if (current_element_iterator_ && !ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputsSize, inputs_.size()));
for (int i = 0; i < inputs_.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kInputs, "[", i, "]"), inputs_[i]));
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, current_element_iterator_));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override
TF_LOCKS_EXCLUDED(mu_) {
if (ctx->restored_element_count().has_value()) {
return RestoreForGlobalShuffle(ctx, reader);
}
mutex_lock l(mu_);
input_impl_.reset();
element_index_ = 0;
current_element_iterator_.reset();
inputs_.clear();
int64_t input_exhausted;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kExhausted, &input_exhausted));
if (!static_cast<bool>(input_exhausted)) {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kElementIndex, &temp));
element_index_ = temp;
}
int64_t current_element_iterator_uninitialized;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentElementIteratorUninitialized,
¤t_element_iterator_uninitialized));
if (!static_cast<bool>(current_element_iterator_uninitialized)) {
TF_RETURN_IF_ERROR(RestoreCurrentElementIterator(ctx, reader));
}
}
return absl::OkStatus();
}
Status RestoreForGlobalShuffle(IteratorContext* ctx,
IteratorStateReader* reader)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
element_count_ = *ctx->restored_element_count();
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
TF_ASSIGN_OR_RETURN(int64_t cardinality, random_access.Cardinality());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
}
input_element_counts_.resize(dataset_iterators_.size(), 0);
next_positions_.resize(dataset_iterators_.size(), 0);
std::fill(input_element_counts_.begin(), input_element_counts_.end(), 0);
std::fill(next_positions_.begin(), next_positions_.end(), 0);
for (size_t count = 0; count < element_count_ && count < cardinality;
++count) {
TF_ASSIGN_OR_RETURN(size_t parent_index, ctx->index_mapper()(count));
absl::StatusOr<size_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
break;
}
TF_RETURN_IF_ERROR(dataset_index.status());
++input_element_counts_[*dataset_index];
next_positions_[*dataset_index] = count + 1;
}
for (size_t i = 0; i < dataset_iterators_.size(); ++i) {
IteratorContext::Params params(ctx);
params.restored_element_count = input_element_counts_[i];
IteratorContext ctx_copy(std::move(params));
TF_RETURN_IF_ERROR(
RestoreInput(&ctx_copy, reader, dataset_iterators_[i]));
ctx->MergeCheckpoint(ctx_copy.checkpoint());
}
return absl::OkStatus();
}
private:
Status BuildCurrentElementIteratorLocked(IteratorContext* ctx,
bool is_get_next)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<model::Node> node = is_get_next ? model_node() : nullptr;
return MakeIteratorFromInputElement(
ctx, this, inputs_, element_index_++, *instantiated_captured_func_,
prefix(), ¤t_element_iterator_, node);
}
Status RestoreCurrentElementIterator(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (ctx->symbolic_checkpoint()) {
return RestoreCurrentElementIteratorSymbolic(ctx, reader);
}
size_t inputs_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kInputsSize, &temp));
inputs_size = static_cast<size_t>(temp);
}
inputs_.reserve(inputs_size);
for (int i = 0; i < inputs_size; i++) {
inputs_.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix(), strings::StrCat(kInputs, "[", i, "]"),
&inputs_.back()));
}
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
Status RestoreCurrentElementIteratorSymbolic(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
bool end_of_sequence;
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, &end_of_sequence));
if (end_of_sequence) {
return absl::FailedPreconditionError(
"Unexpected end of sequence while symbolically restoring "
"FlatMapDataset. Please verify that the input produces data "
"deterministically.");
}
input_ckpt_->Merge(input_ctx->checkpoint());
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
mutex mu_;
size_t element_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<MemoryCheckpoint> input_ckpt_ TF_GUARDED_BY(mu_);
std::vector<Tensor> inputs_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
size_t element_count_ TF_GUARDED_BY(mu_) = 0;
std::vector<int64_t> input_element_counts_ TF_GUARDED_BY(mu_);
std::vector<size_t> next_positions_;
std::vector<std::unique_ptr<IteratorBase>> dataset_iterators_
TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> current_element_iterator_ TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
mutable FlatMapRandomAccessHandler random_access_handler_;
};
FlatMapDatasetOp::FlatMapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void FlatMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), output_types_,
output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FlatMapDataset").Device(DEVICE_CPU),
FlatMapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("FlatMapDataset");
}
}
} | #include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "flat_map_dataset";
class FlatMapDatasetParams : public DatasetParams {
public:
template <typename T>
FlatMapDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return other_arguments_;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(FlatMapDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(FlatMapDatasetOp::kOtherArguments, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return FlatMapDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class FlatMapDatasetOpTest : public DatasetOpsTestBase {};
FlatMapDatasetParams FlatMapDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{"Toutput_types", DataTypeVector({DT_INT64})},
{"output_shapes",
std::vector<PartialTensorShape>({PartialTensorShape({1})})}});
return FlatMapDatasetParams(
std::move(tensor_slice_dataset_params),
{},
func,
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
FlatMapDatasetParams InvalidFlatMapDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef( "NonZero",
{{"T", DT_INT64}});
return FlatMapDatasetParams(std::move(tensor_slice_dataset_params),
{},
func,
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<FlatMapDatasetParams>> GetNextTestCases() {
return {
{FlatMapDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_GET_NEXT_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<FlatMapDatasetParams>> SkipTestCases() {
return {{FlatMapDatasetParams1(),
2, 2, true,
CreateTensors<int64_t>(TensorShape({1}), {{2}})},
{FlatMapDatasetParams1(),
4, 4, true,
CreateTensors<int64_t>(TensorShape({1}), {{4}})},
{FlatMapDatasetParams1(),
9, 9, false},
{FlatMapDatasetParams1(),
10, 9, false}};
}
ITERATOR_SKIP_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
SkipTestCases())
TEST_F(FlatMapDatasetOpTest, DatasetNodeName) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FlatMapDatasetOpTest, DatasetTypeString) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FlatMapDatasetOp::kDatasetType)));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, Cardinality) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorPrefix) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FlatMapDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<FlatMapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{FlatMapDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(FlatMapDatasetOpTest, InvalidMapFunc) {
auto dataset_params = InvalidFlatMapDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} |
47 | #ifndef TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_IFRT_BACKEND_COMPILER_H_
#define TENSORFLOW_COMPILER_MLIR_TFRT_TRANSFORMS_IFRT_IFRT_BACKEND_COMPILER_H_
#include "absl/status/status.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tfrt/backend_compiler.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/tpu_passes.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
namespace tensorflow {
namespace ifrt_serving {
class IfrtBackendCompiler : public tensorflow::BackendCompiler {
public:
explicit IfrtBackendCompiler(TpuCompiler* tpu_compiler = nullptr)
: tpu_compiler_(tpu_compiler) {}
void GetDependentDialects(mlir::DialectRegistry& registry) const override {
if (tpu_compiler_) {
tpu_compiler_->RegisterTPUDialects(®istry);
}
}
absl::Status CompileTensorflow(
tensorflow::tfrt_stub::ModelRuntimeContext& model_context,
mlir::ModuleOp module) const override;
private:
TpuCompiler* tpu_compiler_;
};
}
}
#endif
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/visitor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf_ifrt_passes.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/tpu_passes.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
absl::StatusOr<std::vector<ServingExecutableRegistry::Handle>>
CompileAndRegisterIfrtPrograms(absl::string_view model_name,
mlir::ModuleOp module,
IfrtModelContext& ifrt_model_context) {
std::vector<ServingExecutableRegistry::Handle> handles;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
int64_t program_id;
if (auto attr = func->getAttrOfType<mlir::IntegerAttr>(
"tfrt_ifrt_serving.program_id")) {
program_id = attr.getInt();
} else {
continue;
}
mlir::StatusScopedDiagnosticHandler diag_handler(module->getContext());
auto entry_function_name = func.getSymName();
auto submodule = mlir::TF::CreatePrunedModule(module, entry_function_name);
if (mlir::failed(submodule)) {
return diag_handler.ConsumeStatus();
}
submodule->get()->removeAttr("tf_saved_model.semantics");
submodule->get().walk([&](mlir::func::FuncOp func) {
if (func.getSymName() == entry_function_name) {
func.setName("main");
func.setSymName("main");
func.setPublic();
}
});
TF_ASSIGN_OR_RETURN(
auto executable,
IfrtServingExecutable::Create(
program_id, model_name, entry_function_name.str(),
*std::move(submodule), ifrt_model_context.GetClient(),
&ifrt_model_context.GetThreadPool(),
&ifrt_model_context.GetLoadedVariableRegistry(),
&ifrt_model_context.GetRestoreTensorRegistry(),
ifrt_model_context.checkpoint_loader_queue(),
ifrt_model_context.GetDeviceMgr(),
ifrt_model_context.GetShapeRepresentationFn(),
ifrt_model_context.GetIfrtServingCoreSelector(),
ifrt_model_context.GetCompilationEnvironmentProto()));
TF_ASSIGN_OR_RETURN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handles.push_back(std::move(handle));
}
return handles;
}
absl::Status CompileTensorflowForIfrtServing(
absl::string_view model_name, IfrtModelContext& ifrt_model_context,
mlir::ModuleOp module) {
tsl::profiler::TraceMe trace_me("CompileTensorflowForIfrtServing");
mlir::Builder builder(module.getContext());
TF_RETURN_IF_ERROR(
RunClusterToIfrtRuntimeOpsPassPipeline(module, model_name));
TF_ASSIGN_OR_RETURN(
auto handles,
CompileAndRegisterIfrtPrograms(model_name, module, ifrt_model_context));
for (auto& handle : handles) {
ifrt_model_context.RegisterHandle(std::move(handle));
}
return absl::OkStatus();
}
}
absl::Status IfrtBackendCompiler::CompileTensorflow(
tensorflow::tfrt_stub::ModelRuntimeContext& model_context,
mlir::ModuleOp module) const {
auto ifrt_model_context =
model_context.resource_context().GetResource<IfrtModelContext>(
kIfrtModelContextName);
if (!ifrt_model_context.has_value()) {
return absl::InternalError(
"Failed to find model context for ifrt serving.");
}
mlir::StatusScopedDiagnosticHandler diag_handler(module->getContext());
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_tpu_bct_conversion_before", module);
}
if (tpu_compiler_ != nullptr) {
if (mlir::failed(
tpu_compiler_->RunTPUBackwardCompatConversion(module, {}))) {
return diag_handler.Combine(
absl::InternalError("Failed to handle legacy TPU Ops"));
}
}
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_tpu_bct_conversion_after", module);
}
TF_RETURN_IF_ERROR(tensorflow::tf2xla::v2::RunFunctionTf2xlaClusteringBridge(
module, true,
false));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("before_ifrt_outlining", module);
}
TF_RETURN_IF_ERROR(CompileTensorflowForIfrtServing(
model_context.name(), **ifrt_model_context, module));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("after_ifrt_outlining", module);
}
llvm::SmallVector<mlir::func::FuncOp> to_erase;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
if (func->getAttr("tfrt_ifrt_serving.program_id")) {
to_erase.push_back(func);
}
}
for (auto func : to_erase) {
func->erase();
}
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("after_ifrt_program_removal", module);
}
if (mlir::failed(mlir::verify(module))) {
return diag_handler.ConsumeStatus();
}
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static tsl::thread::ThreadPool* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
TEST(IfrtBackendCompilerTest, Basic) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/ifrt_cluster.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::unique_ptr<tensorflow::tfrt_stub::Runtime> runtime =
tensorflow::tfrt_stub::DefaultTfrtRuntime(1);
tensorflow::tfrt_stub::GraphExecutionOptions graph_execution_options(
runtime.get());
tfrt::ResourceContext resource_context;
tensorflow::tfrt_stub::ModelRuntimeContext runtime_context(
&graph_execution_options, "", &resource_context);
tsl::test_util::MockServingDeviceSelector mock_serving_device_selector;
IfrtServingCoreSelector core_selector(&mock_serving_device_selector,
client->addressable_device_count());
runtime_context.resource_context().CreateResource<IfrtModelContext>(
"IfrtModelContext", client, &core_selector, &GetThreadPool(),
nullptr);
IfrtBackendCompiler compiler;
TF_ASSERT_OK(compiler.CompileTensorflow(runtime_context, mlir_module.get()));
}
}
}
} |
48 | #ifndef TENSORFLOW_TSL_PROFILER_UTILS_TIMESTAMP_UTILS_H_
#define TENSORFLOW_TSL_PROFILER_UTILS_TIMESTAMP_UTILS_H_
#include <cstdint>
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
void SetSessionTimestamps(uint64_t start_walltime_ns, uint64_t stop_walltime_ns,
tensorflow::profiler::XSpace& space);
}
}
#endif
#include "tsl/profiler/utils/timestamp_utils.h"
#include <cstdint>
#include "absl/log/log.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/xplane_builder.h"
#include "tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/utils/xplane_utils.h"
namespace tsl {
namespace profiler {
void SetSessionTimestamps(uint64_t start_walltime_ns, uint64_t stop_walltime_ns,
tensorflow::profiler::XSpace& space) {
if (start_walltime_ns != 0 && stop_walltime_ns != 0) {
tsl::profiler::XPlaneBuilder plane(
tsl::profiler::FindOrAddMutablePlaneWithName(
&space, tsl::profiler::kTaskEnvPlaneName));
plane.AddStatValue(*plane.GetOrCreateStatMetadata(
GetTaskEnvStatTypeStr(kEnvProfileStartTime)),
start_walltime_ns);
plane.AddStatValue(*plane.GetOrCreateStatMetadata(
GetTaskEnvStatTypeStr(kEnvProfileStopTime)),
stop_walltime_ns);
} else {
LOG(WARNING) << "Not Setting Session Timestamps, (start_walltime_ns, "
"stop_walltime_ns) : "
<< start_walltime_ns << ", " << stop_walltime_ns;
}
}
}
} | #include "tsl/profiler/utils/timestamp_utils.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/utils/xplane_utils.h"
#include "tsl/profiler/utils/xplane_visitor.h"
namespace tsl {
namespace profiler {
using ::testing::Eq;
TEST(TimestampUtilsTest, StartAndStopTimestampAreAdded) {
XSpace xspace;
SetSessionTimestamps(1000, 2000, xspace);
const XPlane* xplane = FindPlaneWithName(xspace, kTaskEnvPlaneName);
XPlaneVisitor visitor(xplane, {}, {FindTaskEnvStatType});
auto start_time = visitor.GetStat(TaskEnvStatType::kEnvProfileStartTime);
auto stop_time = visitor.GetStat(TaskEnvStatType::kEnvProfileStopTime);
EXPECT_THAT(start_time->IntOrUintValue(), Eq(1000));
EXPECT_THAT(stop_time->IntOrUintValue(), Eq(2000));
}
}
} |
49 | #ifndef TENSORFLOW_CORE_DATA_SERVICE_BYTE_SIZE_H_
#define TENSORFLOW_CORE_DATA_SERVICE_BYTE_SIZE_H_
#include <cstddef>
#include <ostream>
#include <string>
namespace tensorflow {
namespace data {
class ByteSize final {
public:
constexpr ByteSize() = default;
constexpr ByteSize(const ByteSize&) = default;
ByteSize& operator=(const ByteSize&) = default;
constexpr static ByteSize Bytes(size_t n);
template <class T>
constexpr static ByteSize KB(T n);
template <class T>
constexpr static ByteSize MB(T n);
template <class T>
constexpr static ByteSize GB(T n);
template <class T>
constexpr static ByteSize TB(T n);
ByteSize& operator+=(ByteSize rhs);
ByteSize& operator-=(ByteSize rhs);
template <class T>
ByteSize& operator*=(T rhs);
template <class T>
ByteSize& operator/=(T rhs);
size_t ToUnsignedBytes() const;
double ToDoubleBytes() const;
double ToDoubleKB() const;
double ToDoubleMB() const;
double ToDoubleGB() const;
double ToDoubleTB() const;
std::string DebugString() const;
private:
constexpr explicit ByteSize(double bytes) : bytes_(bytes) {}
size_t bytes_ = 0;
};
constexpr ByteSize ByteSize::Bytes(size_t n) { return ByteSize(n); };
template <class T>
constexpr ByteSize ByteSize::KB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 10));
}
template <class T>
constexpr ByteSize ByteSize::MB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 20));
}
template <class T>
constexpr ByteSize ByteSize::GB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 30));
}
template <class T>
constexpr ByteSize ByteSize::TB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 40));
}
inline ByteSize& ByteSize::operator+=(ByteSize rhs) {
bytes_ += rhs.ToUnsignedBytes();
return *this;
}
inline ByteSize& ByteSize::operator-=(ByteSize rhs) {
if (bytes_ < rhs.ToUnsignedBytes()) {
bytes_ = 0;
return *this;
}
bytes_ -= rhs.ToUnsignedBytes();
return *this;
}
template <class T>
inline ByteSize& ByteSize::operator*=(T rhs) {
bytes_ *= rhs;
return *this;
}
template <class T>
inline ByteSize& ByteSize::operator/=(T rhs) {
bytes_ /= rhs;
return *this;
}
inline ByteSize operator+(ByteSize lhs, ByteSize rhs) {
return lhs += rhs;
}
inline ByteSize operator-(ByteSize lhs, ByteSize rhs) {
return lhs -= rhs;
}
template <class T>
inline ByteSize operator*(ByteSize lhs, T rhs) { return lhs *= rhs; }
template <class T>
inline ByteSize operator*(T lhs, ByteSize rhs) { return rhs *= lhs; }
template <class T>
inline ByteSize operator/(ByteSize lhs, T rhs) { return lhs /= rhs; }
inline double operator/(ByteSize lhs, ByteSize rhs) {
return lhs.ToDoubleBytes() / rhs.ToDoubleBytes();
}
inline bool operator<(ByteSize lhs, ByteSize rhs) {
return lhs.ToUnsignedBytes() < rhs.ToUnsignedBytes();
}
inline bool operator>(ByteSize lhs, ByteSize rhs) {
return rhs < lhs;
}
inline bool operator>=(ByteSize lhs, ByteSize rhs) {
return !(lhs < rhs);
}
inline bool operator<=(ByteSize lhs, ByteSize rhs) {
return !(rhs < lhs);
}
inline bool operator==(ByteSize lhs, ByteSize rhs) {
return lhs.ToUnsignedBytes() == rhs.ToUnsignedBytes();
}
inline bool operator!=(ByteSize lhs, ByteSize rhs) {
return !(lhs == rhs);
}
inline std::ostream& operator<<(std::ostream& os, ByteSize byte_size) {
return os << byte_size.DebugString();
}
}
}
#endif
#include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include "absl/strings/str_cat.h"
namespace tensorflow {
namespace data {
size_t ByteSize::ToUnsignedBytes() const { return bytes_; }
double ByteSize::ToDoubleBytes() const { return static_cast<double>(bytes_); }
double ByteSize::ToDoubleKB() const { return *this / ByteSize::KB(1); }
double ByteSize::ToDoubleMB() const { return *this / ByteSize::MB(1); }
double ByteSize::ToDoubleGB() const { return *this / ByteSize::GB(1); }
double ByteSize::ToDoubleTB() const { return *this / ByteSize::TB(1); }
std::string ByteSize::DebugString() const {
if (*this < ByteSize::KB(1)) {
return absl::StrCat(ToUnsignedBytes(), "B");
}
if (*this < ByteSize::MB(1)) {
return absl::StrCat(ToDoubleKB(), "KB");
}
if (*this < ByteSize::GB(1)) {
return absl::StrCat(ToDoubleMB(), "MB");
}
if (*this < ByteSize::TB(1)) {
return absl::StrCat(ToDoubleGB(), "GB");
}
return absl::StrCat(ToDoubleTB(), "TB");
}
}
} | #include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::Eq;
using ::testing::Not;
TEST(ByteSizeTest, Constructors) {
EXPECT_EQ(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::Bytes(1024));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 63), ByteSize::TB(size_t{1} << 23));
EXPECT_EQ(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1), ByteSize::Bytes(size_t{1} << 10));
EXPECT_EQ(ByteSize::KB(0.9), ByteSize::Bytes(1024 * 0.9));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::Bytes(1024 * 1.5));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(1024), ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1), ByteSize::Bytes(size_t{1} << 20));
EXPECT_EQ(ByteSize::MB(0.9), ByteSize::Bytes(size_t{1} << 20) * 0.9);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::Bytes(size_t{1} << 20) * 1.5);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1024), ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1), ByteSize::Bytes(size_t{1} << 30));
EXPECT_EQ(ByteSize::GB(0.9), ByteSize::Bytes(size_t{1} << 30) * 0.9);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::Bytes(size_t{1} << 30) * 1.5);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::GB(1.5));
EXPECT_EQ(ByteSize::GB(1024), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1), ByteSize::Bytes(size_t{1} << 40));
EXPECT_EQ(ByteSize::TB(0.9), ByteSize::Bytes(size_t{1} << 40) * 0.9);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::Bytes(size_t{1} << 40) * 1.5);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::TB(1024), ByteSize::TB(1024));
EXPECT_EQ(ByteSize::TB(size_t{1} << 23), ByteSize::TB(size_t{1} << 23));
EXPECT_THAT(ByteSize::Bytes(0), Not(Eq(ByteSize::Bytes(1))));
EXPECT_THAT(ByteSize::Bytes(1025), Not(Eq(ByteSize::KB(1))));
EXPECT_THAT(ByteSize::KB(1), Not(Eq(ByteSize::MB(1))));
EXPECT_THAT(ByteSize::MB(1), Not(Eq(ByteSize::GB(1))));
EXPECT_THAT(ByteSize::GB(1), Not(Eq(ByteSize::TB(1))));
EXPECT_THAT(ByteSize::TB(1), Not(Eq(ByteSize::TB(2))));
}
TEST(ByteSizeTest, ConstexprConstruction) {
constexpr ByteSize default_byte_size;
EXPECT_EQ(default_byte_size, ByteSize::Bytes(0));
constexpr ByteSize bytes = ByteSize::Bytes(1);
EXPECT_EQ(bytes, ByteSize::Bytes(1));
constexpr ByteSize kb = ByteSize::KB(1);
EXPECT_EQ(kb, ByteSize::KB(1));
constexpr ByteSize mb = ByteSize::MB(1);
EXPECT_EQ(mb, ByteSize::MB(1));
constexpr ByteSize gb = ByteSize::GB(1);
EXPECT_EQ(gb, ByteSize::GB(1));
constexpr ByteSize tb = ByteSize::TB(1);
EXPECT_EQ(tb, ByteSize::TB(1));
constexpr ByteSize tb_copy(tb);
EXPECT_EQ(tb_copy, tb);
}
TEST(ByteSizeTest, ConvertToBytes) {
EXPECT_EQ(ByteSize::Bytes(0).ToUnsignedBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleKB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleMB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleGB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleTB(), 0);
EXPECT_EQ(ByteSize::Bytes(1).ToUnsignedBytes(), 1);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleBytes(), 1.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleKB(), 1.0 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleMB(), 1.0 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleGB(), 1.0 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleTB(),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::KB(0.25).ToUnsignedBytes(), 0.25 * (size_t{1} << 10));
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleBytes(), 0.25 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleKB(), 0.25);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleMB(), 0.25 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleGB(), 0.25 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleTB(), 0.25 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::MB(0.5).ToUnsignedBytes(), 0.5 * (size_t{1} << 20));
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleBytes(), 0.5 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleKB(), 0.5 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleMB(), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleGB(), 0.5 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleTB(), 0.5 / 1024 / 1024);
EXPECT_EQ(ByteSize::GB(10).ToUnsignedBytes(), 10.0 * (size_t{1} << 30));
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleBytes(), 10.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleKB(), 10.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleMB(), 10.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleGB(), 10.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleTB(), 10.0 / 1024);
EXPECT_EQ(ByteSize::TB(1024).ToUnsignedBytes(), 1024 * (size_t{1} << 40));
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleBytes(),
1024.0 * 1024 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleKB(),
1024.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleMB(), 1024.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleGB(), 1024.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleTB(), 1024.0);
}
TEST(ByteSizeTest, Arithmetics) {
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::Bytes(512), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(0.5) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::KB(512), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::Bytes(512), ByteSize::Bytes(1049088));
EXPECT_EQ(ByteSize::GB(0.5) + ByteSize::MB(256) + ByteSize::MB(256),
ByteSize::GB(1));
std::vector<ByteSize> GBs(1024, ByteSize::GB(1));
EXPECT_EQ(absl::c_accumulate(GBs, ByteSize::Bytes(0)), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(1) + ByteSize::TB(0.5) + ByteSize::GB(512),
ByteSize::TB(2));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) - ByteSize::Bytes(512), ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::KB(512) - ByteSize::KB(512),
ByteSize::MB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512), ByteSize::GB(0.5));
EXPECT_EQ(ByteSize::GB(0.5) - ByteSize::MB(512), ByteSize::GB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512) - ByteSize::MB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::TB(1) - ByteSize::GB(512) - ByteSize::GB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1) * 1024, ByteSize::KB(1));
EXPECT_EQ(ByteSize::KB(1) * 1024, ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(1) * 1024, ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(1) * 1024, ByteSize::TB(1));
EXPECT_EQ(ByteSize::Bytes(1) * 1.1, ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::KB(1) * 1.2, ByteSize::KB(1.2));
EXPECT_EQ(ByteSize::MB(1) * 1.3, ByteSize::MB(1.3));
EXPECT_EQ(ByteSize::GB(1) * 1.4, ByteSize::GB(1.4));
EXPECT_EQ(ByteSize::TB(1) * 1.5, ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::KB(1) * 0.5, ByteSize::Bytes(512));
EXPECT_EQ(ByteSize::MB(1) * 0.5, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) * 0.5, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1) * 0.25, ByteSize::GB(256));
EXPECT_EQ(1024 * ByteSize::Bytes(1), ByteSize::KB(1));
EXPECT_EQ(1024 * ByteSize::KB(1), ByteSize::MB(1));
EXPECT_EQ(1024 * ByteSize::MB(1), ByteSize::GB(1));
EXPECT_EQ(1024 * ByteSize::GB(1), ByteSize::TB(1));
EXPECT_EQ(0.9 * ByteSize::TB(1), ByteSize::GB(921.6));
EXPECT_EQ(0 * ByteSize::TB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) / 1, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) / 2, ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) / 2, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) / 2, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1.5) / 2, ByteSize::GB(768));
EXPECT_EQ(ByteSize::KB(1) / 0.5, ByteSize::KB(2));
EXPECT_EQ(ByteSize::MB(1) / 0.5, ByteSize::MB(2));
EXPECT_EQ(ByteSize::GB(1) / 0.5, ByteSize::GB(2));
EXPECT_EQ(ByteSize::TB(1) / 0.25, ByteSize::TB(4));
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0) / ByteSize::KB(1), 0.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1) / ByteSize::TB(1),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::KB(2), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(512) / ByteSize::MB(1), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::MB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::MB(1) / ByteSize::GB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(1) / ByteSize::TB(1), 1.0 / 1024.0);
}
TEST(ByteSizeTest, Assignments) {
ByteSize byte_size;
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size = ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(1));
for (size_t i = 0; i < 1023; ++i) {
byte_size += ByteSize::Bytes(1);
}
EXPECT_EQ(byte_size, ByteSize::KB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size *= 2;
}
EXPECT_EQ(byte_size, ByteSize::MB(1));
byte_size *= 1024 * 1024;
EXPECT_EQ(byte_size, ByteSize::TB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size /= 2;
}
EXPECT_EQ(byte_size, ByteSize::GB(1));
for (size_t i = 0; i < 4; ++i) {
byte_size -= ByteSize::MB(256);
}
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size -= ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
}
TEST(ByteSizeTest, Comparisons) {
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_LE(ByteSize::KB(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LE(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LE(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LT(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LE(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LE(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LE(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LT(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LE(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LE(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LE(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LT(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LE(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LT(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LE(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LT(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_LE(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::GB(1), ByteSize::MB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1));
EXPECT_GT(ByteSize::KB(1), ByteSize::Bytes(1));
EXPECT_GT(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1) + ByteSize::MB(1) +
ByteSize::KB(1) + ByteSize::Bytes(1));
EXPECT_GT(ByteSize::GB(1), 0.0000001 * ByteSize::TB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1) * 1023);
EXPECT_GT(ByteSize::KB(1), ByteSize::KB(3) / 4);
EXPECT_GT(ByteSize::Bytes(1), ByteSize::TB(0));
EXPECT_GE(ByteSize::TB(0.5), ByteSize::GB(0.5));
EXPECT_GE(ByteSize::GB(0.5), ByteSize::MB(0.5));
EXPECT_GE(ByteSize::MB(0.5), ByteSize::KB(0.5));
EXPECT_GE(ByteSize::KB(0.5), ByteSize::Bytes(1));
EXPECT_GE(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::Bytes(0), ByteSize::Bytes(0));
}
TEST(ByteSizeTest, DebugString) {
EXPECT_EQ(ByteSize::Bytes(0).DebugString(), "0B");
EXPECT_EQ(ByteSize::Bytes(1).DebugString(), "1B");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 10).DebugString(), "1KB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 20).DebugString(), "1MB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 30).DebugString(), "1GB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 40).DebugString(), "1TB");
EXPECT_EQ(ByteSize::KB(0.5).DebugString(), "512B");
EXPECT_EQ(ByteSize::KB(1).DebugString(), "1KB");
EXPECT_EQ(ByteSize::KB(1.5).DebugString(), "1.5KB");
EXPECT_EQ(ByteSize::KB(1024).DebugString(), "1MB");
EXPECT_EQ(ByteSize::KB(1024 * 1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::KB(1024 * 1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::MB(0.5).DebugString(), "512KB");
EXPECT_EQ(ByteSize::MB(1).DebugString(), "1MB");
EXPECT_EQ(ByteSize::MB(1.5).DebugString(), "1.5MB");
EXPECT_EQ(ByteSize::MB(1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::MB(1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::GB(0.5).DebugString(), "512MB");
EXPECT_EQ(ByteSize::GB(1).DebugString(), "1GB");
EXPECT_EQ(ByteSize::GB(1.5).DebugString(), "1.5GB");
EXPECT_EQ(ByteSize::GB(1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(0.5).DebugString(), "512GB");
EXPECT_EQ(ByteSize::TB(1).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(1.5).DebugString(), "1.5TB");
EXPECT_EQ(ByteSize::TB(1024).DebugString(), "1024TB");
}
}
}
} |
50 | #ifndef THIRD_PARTY_CEL_CPP_COMMON_VALUES_OPTIONAL_VALUE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_VALUES_OPTIONAL_VALUE_H_
#include <memory>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/type.h"
#include "common/value_interface.h"
#include "common/value_kind.h"
#include "common/values/opaque_value.h"
#include "internal/status_macros.h"
namespace cel {
class ValueView;
class Value;
class ValueManager;
class OptionalValueInterface;
class OptionalValue;
class OptionalValueView;
class OptionalValueInterface : public OpaqueValueInterface {
public:
using alternative_type = OptionalValue;
using view_alternative_type = OptionalValueView;
OptionalType GetType(TypeManager& type_manager) const {
return Cast<OptionalType>(GetTypeImpl(type_manager));
}
absl::string_view GetTypeName() const final { return "optional_type"; }
std::string DebugString() const final;
virtual bool HasValue() const = 0;
absl::Status Equal(ValueManager& value_manager, ValueView other,
cel::Value& result) const override;
virtual void Value(cel::Value& scratch) const = 0;
cel::Value Value() const;
private:
Type GetTypeImpl(TypeManager&) const override { return OptionalType(); }
NativeTypeId GetNativeTypeId() const noexcept final {
return NativeTypeId::For<OptionalValueInterface>();
}
};
template <>
struct SubsumptionTraits<OptionalValueInterface> {
static bool IsA(const ValueInterface& interface) {
return interface.kind() == ValueKind::kOpaque &&
NativeTypeId::Of(interface) ==
NativeTypeId::For<OptionalValueInterface>();
}
};
class OptionalValue final : public OpaqueValue {
public:
using interface_type = OptionalValueInterface;
using view_alternative_type = OptionalValueView;
ABSL_ATTRIBUTE_PURE_FUNCTION static OptionalValue None();
static OptionalValue Of(MemoryManagerRef memory_manager, cel::Value value);
explicit OptionalValue(OptionalValueView value);
OptionalValue() : OptionalValue(None()) {}
OptionalValue(const OptionalValue&) = default;
OptionalValue(OptionalValue&&) = default;
OptionalValue& operator=(const OptionalValue&) = default;
OptionalValue& operator=(OptionalValue&&) = default;
template <typename T, typename = std::enable_if_t<std::is_base_of_v<
OptionalValueInterface, std::remove_const_t<T>>>>
OptionalValue(Shared<T> interface) : OpaqueValue(std::move(interface)) {}
OptionalType GetType(TypeManager& type_manager) const {
return (*this)->GetType(type_manager);
}
bool HasValue() const { return (*this)->HasValue(); }
void Value(cel::Value& result) const;
cel::Value Value() const;
const interface_type& operator*() const {
return Cast<OptionalValueInterface>(OpaqueValue::operator*());
}
absl::Nonnull<const interface_type*> operator->() const {
return Cast<OptionalValueInterface>(OpaqueValue::operator->());
}
private:
friend struct SubsumptionTraits<OptionalValue>;
explicit OptionalValue(OpaqueValue&& value) noexcept
: OpaqueValue(std::move(value)) {}
};
template <>
struct SubsumptionTraits<OptionalValue> final {
static bool IsA(const OpaqueValue& value) {
return NativeTypeId::Of(value) ==
NativeTypeId::For<OptionalValueInterface>();
}
static const OptionalValue& DownCast(const OpaqueValue& value) {
ABSL_DCHECK(IsA(value));
return *reinterpret_cast<const OptionalValue*>(std::addressof(value));
}
static OptionalValue& DownCast(OpaqueValue& value) {
ABSL_DCHECK(IsA(value));
return *reinterpret_cast<OptionalValue*>(std::addressof(value));
}
static OptionalValue DownCast(OpaqueValue&& value) {
ABSL_DCHECK(IsA(value));
return OptionalValue(std::move(value));
}
};
class OptionalValueView final : public OpaqueValueView {
public:
using interface_type = OptionalValueInterface;
using alternative_type = OptionalValue;
ABSL_ATTRIBUTE_PURE_FUNCTION static OptionalValueView None();
OptionalValueView() : OptionalValueView(None()) {}
OptionalValueView(SharedView<const OptionalValueInterface> interface)
: OpaqueValueView(interface) {}
OptionalValueView(
const OptionalValue& value ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept
: OpaqueValueView(value) {}
OptionalValueView& operator=(
const OptionalValue& value ABSL_ATTRIBUTE_LIFETIME_BOUND) {
OpaqueValueView::operator=(value);
return *this;
}
OptionalValueView& operator=(OptionalValue&&) = delete;
OptionalValueView(const OptionalValueView&) = default;
OptionalValueView& operator=(const OptionalValueView&) = default;
OptionalType GetType(TypeManager& type_manager) const {
return (*this)->GetType(type_manager);
}
bool HasValue() const { return (*this)->HasValue(); }
void Value(cel::Value& result) const;
cel::Value Value() const;
const interface_type& operator*() const {
return Cast<OptionalValueInterface>(OpaqueValueView::operator*());
}
absl::Nonnull<const interface_type*> operator->() const {
return Cast<OptionalValueInterface>(OpaqueValueView::operator->());
}
private:
friend struct SubsumptionTraits<OptionalValueView>;
explicit OptionalValueView(OpaqueValueView value) noexcept
: OpaqueValueView(value) {}
};
inline OptionalValue::OptionalValue(OptionalValueView value)
: OpaqueValue(value) {}
template <>
struct SubsumptionTraits<OptionalValueView> final {
static bool IsA(OpaqueValueView value) {
return NativeTypeId::Of(value) ==
NativeTypeId::For<OptionalValueInterface>();
}
static OptionalValueView DownCast(OpaqueValueView value) {
ABSL_DCHECK(IsA(value));
return OptionalValueView(value);
}
};
}
#endif
#include <string>
#include <utility>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_kind.h"
namespace cel {
namespace {
class FullOptionalValue final : public OptionalValueInterface {
public:
explicit FullOptionalValue(cel::Value value) : value_(std::move(value)) {}
bool HasValue() const override { return true; }
void Value(cel::Value& result) const override { result = value_; }
private:
friend struct NativeTypeTraits<FullOptionalValue>;
Type GetTypeImpl(TypeManager& type_manager) const override {
return type_manager.CreateOptionalType(value_.GetType(type_manager));
}
const cel::Value value_;
};
}
template <>
struct NativeTypeTraits<FullOptionalValue> {
static bool SkipDestructor(const FullOptionalValue& value) {
return NativeType::SkipDestructor(value.value_);
}
};
std::string OptionalValueInterface::DebugString() const {
if (HasValue()) {
return absl::StrCat("optional(", Value().DebugString(), ")");
}
return "optional.none()";
}
OptionalValue OptionalValue::Of(MemoryManagerRef memory_manager,
cel::Value value) {
ABSL_DCHECK(value.kind() != ValueKind::kError &&
value.kind() != ValueKind::kUnknown);
return OptionalValue(
memory_manager.MakeShared<FullOptionalValue>(std::move(value)));
}
absl::Status OptionalValueInterface::Equal(ValueManager& value_manager,
ValueView other,
cel::Value& result) const {
if (auto other_value = As<OptionalValueView>(other);
other_value.has_value()) {
if (HasValue() != other_value->HasValue()) {
result = BoolValueView{false};
return absl::OkStatus();
}
if (!HasValue()) {
result = BoolValueView{true};
return absl::OkStatus();
}
return Value().Equal(value_manager, other_value->Value(), result);
return absl::OkStatus();
}
result = BoolValueView{false};
return absl::OkStatus();
}
} | #include <sstream>
#include <utility>
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
using testing::TestParamInfo;
using cel::internal::StatusIs;
class OptionalValueTest : public common_internal::ThreadCompatibleValueTest<> {
public:
OptionalValue OptionalNone() { return OptionalValue::None(); }
OptionalValue OptionalOf(Value value) {
return OptionalValue::Of(memory_manager(), std::move(value));
}
};
TEST_P(OptionalValueTest, Kind) {
auto value = OptionalNone();
EXPECT_EQ(value.kind(), OptionalValue::kKind);
EXPECT_EQ(OpaqueValue(value).kind(), OptionalValue::kKind);
EXPECT_EQ(Value(value).kind(), OptionalValue::kKind);
}
TEST_P(OptionalValueTest, Type) {
auto value = OptionalNone();
EXPECT_EQ(value.GetType(type_manager()), OptionalType());
EXPECT_EQ(OpaqueValue(value).GetType(type_manager()), OptionalType());
EXPECT_EQ(Value(value).GetType(type_manager()), OptionalType());
}
TEST_P(OptionalValueTest, DebugString) {
auto value = OptionalNone();
{
std::ostringstream out;
out << value;
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << OpaqueValue(value);
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << Value(value);
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << OptionalOf(IntValue());
EXPECT_EQ(out.str(), "optional(0)");
}
}
TEST_P(OptionalValueTest, GetSerializedSize) {
EXPECT_THAT(OptionalValue().GetSerializedSize(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, SerializeTo) {
absl::Cord value;
EXPECT_THAT(OptionalValue().SerializeTo(value_manager(), value),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, Serialize) {
EXPECT_THAT(OptionalValue().Serialize(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, GetTypeUrl) {
EXPECT_THAT(OptionalValue().GetTypeUrl(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, ConvertToAny) {
EXPECT_THAT(OptionalValue().ConvertToAny(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, ConvertToJson) {
EXPECT_THAT(OptionalValue().ConvertToJson(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueTest, InstanceOf) {
auto value = OptionalNone();
EXPECT_TRUE(InstanceOf<OptionalValue>(value));
EXPECT_TRUE(InstanceOf<OptionalValue>(OpaqueValue(value)));
EXPECT_TRUE(InstanceOf<OptionalValue>(Value(value)));
}
TEST_P(OptionalValueTest, Cast) {
auto value = OptionalNone();
EXPECT_THAT(Cast<OptionalValue>(value), An<OptionalValue>());
EXPECT_THAT(Cast<OptionalValue>(OpaqueValue(value)), An<OptionalValue>());
EXPECT_THAT(Cast<OptionalValue>(Value(value)), An<OptionalValue>());
}
TEST_P(OptionalValueTest, As) {
auto value = OptionalNone();
EXPECT_THAT(As<OptionalValue>(value), Ne(absl::nullopt));
EXPECT_THAT(As<OptionalValue>(OpaqueValue(value)), Ne(absl::nullopt));
EXPECT_THAT(As<OptionalValue>(Value(value)), Ne(absl::nullopt));
}
TEST_P(OptionalValueTest, HasValue) {
auto value = OptionalNone();
EXPECT_FALSE(value.HasValue());
value = OptionalOf(IntValue());
EXPECT_TRUE(value.HasValue());
}
TEST_P(OptionalValueTest, Value) {
auto value = OptionalNone();
auto element = value.Value();
ASSERT_TRUE(InstanceOf<ErrorValue>(element));
EXPECT_THAT(Cast<ErrorValue>(element).NativeValue(),
StatusIs(absl::StatusCode::kFailedPrecondition));
value = OptionalOf(IntValue());
element = value.Value();
ASSERT_TRUE(InstanceOf<IntValue>(element));
EXPECT_EQ(Cast<IntValue>(element), IntValue());
}
INSTANTIATE_TEST_SUITE_P(
OptionalValueTest, OptionalValueTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
OptionalValueTest::ToString);
class OptionalValueViewTest
: public common_internal::ThreadCompatibleValueTest<> {
public:
OptionalValueView OptionalNone() { return OptionalValueView::None(); }
OptionalValue OptionalOf(Value value) {
return OptionalValue::Of(memory_manager(), std::move(value));
}
};
TEST_P(OptionalValueViewTest, Kind) {
auto value = OptionalNone();
EXPECT_EQ(value.kind(), OptionalValueView::kKind);
EXPECT_EQ(OpaqueValueView(value).kind(), OptionalValueView::kKind);
EXPECT_EQ(ValueView(value).kind(), OptionalValueView::kKind);
}
TEST_P(OptionalValueViewTest, Type) {
auto value = OptionalNone();
EXPECT_EQ(value.GetType(type_manager()), OptionalType());
EXPECT_EQ(OpaqueValueView(value).GetType(type_manager()), OptionalType());
EXPECT_EQ(ValueView(value).GetType(type_manager()), OptionalType());
}
TEST_P(OptionalValueViewTest, DebugString) {
auto value = OptionalNone();
{
std::ostringstream out;
out << value;
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << OpaqueValueView(value);
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << ValueView(value);
EXPECT_EQ(out.str(), "optional.none()");
}
{
std::ostringstream out;
out << OptionalOf(IntValue());
EXPECT_EQ(out.str(), "optional(0)");
}
}
TEST_P(OptionalValueViewTest, GetSerializedSize) {
EXPECT_THAT(OptionalValueView().GetSerializedSize(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueViewTest, SerializeTo) {
absl::Cord value;
EXPECT_THAT(OptionalValueView().SerializeTo(value_manager(), value),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueViewTest, Serialize) {
EXPECT_THAT(OptionalValueView().Serialize(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueViewTest, GetTypeUrl) {
EXPECT_THAT(OptionalValueView().GetTypeUrl(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueViewTest, ConvertToAny) {
EXPECT_THAT(OptionalValueView().ConvertToAny(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueViewTest, ConvertToJson) {
EXPECT_THAT(OptionalValueView().ConvertToJson(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(OptionalValueViewTest, InstanceOf) {
auto value = OptionalNone();
EXPECT_TRUE(InstanceOf<OptionalValueView>(value));
EXPECT_TRUE(InstanceOf<OptionalValueView>(OpaqueValueView(value)));
EXPECT_TRUE(InstanceOf<OptionalValueView>(ValueView(value)));
}
TEST_P(OptionalValueViewTest, Cast) {
auto value = OptionalNone();
EXPECT_THAT(Cast<OptionalValueView>(value), An<OptionalValueView>());
EXPECT_THAT(Cast<OptionalValueView>(OpaqueValueView(value)),
An<OptionalValueView>());
EXPECT_THAT(Cast<OptionalValueView>(ValueView(value)),
An<OptionalValueView>());
}
TEST_P(OptionalValueViewTest, As) {
auto value = OptionalNone();
EXPECT_THAT(As<OptionalValueView>(value), Ne(absl::nullopt));
EXPECT_THAT(As<OptionalValueView>(OpaqueValueView(value)), Ne(absl::nullopt));
EXPECT_THAT(As<OptionalValueView>(ValueView(value)), Ne(absl::nullopt));
}
TEST_P(OptionalValueViewTest, HasValue) {
auto value_view = OptionalNone();
EXPECT_FALSE(value_view.HasValue());
auto value = OptionalOf(IntValue());
EXPECT_TRUE(OptionalValueView(value).HasValue());
}
TEST_P(OptionalValueViewTest, Value) {
auto value_view = OptionalNone();
auto element = value_view.Value();
ASSERT_TRUE(InstanceOf<ErrorValue>(element));
EXPECT_THAT(Cast<ErrorValue>(element).NativeValue(),
StatusIs(absl::StatusCode::kFailedPrecondition));
auto value = OptionalOf(IntValue());
element = OptionalValueView(value).Value();
ASSERT_TRUE(InstanceOf<IntValue>(element));
EXPECT_EQ(Cast<IntValue>(element), IntValue());
}
INSTANTIATE_TEST_SUITE_P(
OptionalValueViewTest, OptionalValueViewTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
OptionalValueViewTest::ToString);
}
} |
51 | #include "sample2.h"
#include <string.h>
const char* MyString::CloneCString(const char* a_c_string) {
if (a_c_string == nullptr) return nullptr;
const size_t len = strlen(a_c_string);
char* const clone = new char[len + 1];
memcpy(clone, a_c_string, len + 1);
return clone;
}
void MyString::Set(const char* a_c_string) {
const char* const temp = MyString::CloneCString(a_c_string);
delete[] c_string_;
c_string_ = temp;
} | #include "sample2.h"
#include "gtest/gtest.h"
namespace {
TEST(MyString, DefaultConstructor) {
const MyString s;
EXPECT_STREQ(nullptr, s.c_string());
EXPECT_EQ(0u, s.Length());
}
const char kHelloString[] = "Hello, world!";
TEST(MyString, ConstructorFromCString) {
const MyString s(kHelloString);
EXPECT_EQ(0, strcmp(s.c_string(), kHelloString));
EXPECT_EQ(sizeof(kHelloString) / sizeof(kHelloString[0]) - 1, s.Length());
}
TEST(MyString, CopyConstructor) {
const MyString s1(kHelloString);
const MyString s2 = s1;
EXPECT_EQ(0, strcmp(s2.c_string(), kHelloString));
}
TEST(MyString, Set) {
MyString s;
s.Set(kHelloString);
EXPECT_EQ(0, strcmp(s.c_string(), kHelloString));
s.Set(s.c_string());
EXPECT_EQ(0, strcmp(s.c_string(), kHelloString));
s.Set(nullptr);
EXPECT_STREQ(nullptr, s.c_string());
}
} |
52 | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_PERIODIC_FUNCTION_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_PERIODIC_FUNCTION_H_
#include <functional>
#include <memory>
#include <string>
#include "absl/functional/any_invocable.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace serving {
namespace internal {
class PeriodicFunctionTestAccess;
}
class PeriodicFunction {
public:
struct Options {
Options() {}
ThreadOptions thread_options;
string thread_name_prefix = "periodic_function";
Env* env = Env::Default();
int64_t startup_delay_micros = 0;
};
PeriodicFunction(absl::AnyInvocable<void()> function, int64_t interval_micros,
const Options& options = Options());
~PeriodicFunction();
private:
friend class internal::PeriodicFunctionTestAccess;
void NotifyStop();
void RunLoop(int64_t start);
absl::AnyInvocable<void()> function_;
const int64_t interval_micros_;
const Options options_;
Notification stop_thread_;
std::unique_ptr<Thread> thread_ = nullptr;
PeriodicFunction(const PeriodicFunction&) = delete;
void operator=(const PeriodicFunction&) = delete;
};
}
}
#endif
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <algorithm>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace serving {
PeriodicFunction::PeriodicFunction(absl::AnyInvocable<void()> function,
const int64_t interval_micros,
const Options& options)
: function_(std::move(function)),
interval_micros_([interval_micros]() -> int64 {
if (interval_micros < 0) {
const string error = strings::StrCat(
" The value of 'interval_micros' should be >= 0: ",
interval_micros, ". ");
DCHECK(false) << error;
LOG(WARNING) << error << "Resetting it to 0.";
return 0;
}
return interval_micros;
}()),
options_(options) {
thread_.reset(options_.env->StartThread(
options_.thread_options, options_.thread_name_prefix, [this]() {
RunLoop(options_.env->NowMicros());
}));
}
PeriodicFunction::~PeriodicFunction() {
NotifyStop();
thread_.reset();
}
void PeriodicFunction::NotifyStop() {
if (!stop_thread_.HasBeenNotified()) {
stop_thread_.Notify();
}
}
void PeriodicFunction::RunLoop(const int64_t start) {
{
if (options_.startup_delay_micros > 0) {
const int64_t deadline = start + options_.startup_delay_micros;
options_.env->SleepForMicroseconds(deadline - start);
}
while (!stop_thread_.HasBeenNotified()) {
VLOG(3) << "Running function.";
const int64_t begin = options_.env->NowMicros();
function_();
const int64_t end =
std::max(static_cast<int64_t>(options_.env->NowMicros()), begin);
const int64_t deadline = begin + interval_micros_;
if (deadline > end) {
if (end > begin) {
VLOG(3) << "Reducing interval_micros from " << interval_micros_
<< " to " << (deadline - end);
}
options_.env->SleepForMicroseconds(deadline - end);
} else {
VLOG(3) << "Function took longer than interval_micros, so not sleeping";
}
}
}
}
}
} | #include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include <memory>
#include <string>
#include "tensorflow/core/kernels/batching_util/fake_clock_env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace internal {
class PeriodicFunctionTestAccess {
public:
explicit PeriodicFunctionTestAccess(PeriodicFunction* periodic_function)
: periodic_function_(periodic_function) {}
void NotifyStop() { periodic_function_->NotifyStop(); }
private:
PeriodicFunction* const periodic_function_;
};
}
namespace {
using test_util::FakeClockEnv;
void StopPeriodicFunction(PeriodicFunction* periodic_function,
FakeClockEnv* fake_clock_env,
const uint64 pf_interval_micros) {
fake_clock_env->BlockUntilThreadsAsleep(1);
internal::PeriodicFunctionTestAccess(periodic_function).NotifyStop();
fake_clock_env->AdvanceByMicroseconds(pf_interval_micros);
}
TEST(PeriodicFunctionTest, ObeyInterval) {
const int64_t kPeriodMicros = 2;
const int kCalls = 10;
int actual_calls = 0;
{
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
for (int i = 0; i < kCalls; ++i) {
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
ASSERT_EQ(actual_calls, kCalls + 1);
}
TEST(PeriodicFunctionTest, ObeyStartupDelay) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
int actual_calls = 0;
{
PeriodicFunction::Options options;
options.startup_delay_micros = kDelayMicros;
FakeClockEnv fake_clock_env(Env::Default());
options.env = &fake_clock_env;
PeriodicFunction periodic_function([&actual_calls]() { ++actual_calls; },
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
EXPECT_EQ(0, actual_calls);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
StopPeriodicFunction(&periodic_function, &fake_clock_env, kDelayMicros);
}
EXPECT_EQ(1, actual_calls);
}
TEST(PeriodicFunctionTest, StartupDelayRace) {
const int64_t kDelayMicros = 10;
const int64_t kPeriodMicros = kDelayMicros / 10;
mutex mu;
int counter = 0;
std::unique_ptr<Notification> listener(new Notification);
FakeClockEnv fake_clock_env(Env::Default());
PeriodicFunction::Options options;
options.env = &fake_clock_env;
options.startup_delay_micros = kDelayMicros;
PeriodicFunction periodic_function(
[&mu, &counter, &listener]() {
mutex_lock l(mu);
counter++;
listener->Notify();
},
kPeriodMicros, options);
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kDelayMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(1, counter);
listener.reset(new Notification);
}
fake_clock_env.BlockUntilThreadsAsleep(1);
fake_clock_env.AdvanceByMicroseconds(kPeriodMicros);
listener->WaitForNotification();
{
mutex_lock l(mu);
EXPECT_EQ(2, counter);
}
StopPeriodicFunction(&periodic_function, &fake_clock_env, kPeriodMicros);
}
TEST(PeriodicFunctionTest, MinInterval) {
PeriodicFunction periodic_function(
[]() { Env::Default()->SleepForMicroseconds(20 * 1000); }, 0);
}
class PeriodicFunctionWithFakeClockEnvTest : public ::testing::Test {
protected:
const int64_t kPeriodMicros = 50;
PeriodicFunctionWithFakeClockEnvTest()
: fake_clock_env_(Env::Default()),
counter_(0),
pf_(
[this]() {
mutex_lock l(counter_mu_);
++counter_;
},
kPeriodMicros, GetPeriodicFunctionOptions()) {}
PeriodicFunction::Options GetPeriodicFunctionOptions() {
PeriodicFunction::Options options;
options.thread_name_prefix = "ignore";
options.env = &fake_clock_env_;
return options;
}
void SetUp() override {
ASSERT_TRUE(AwaitCount(1));
}
void TearDown() override {
StopPeriodicFunction(&pf_, &fake_clock_env_, kPeriodMicros);
}
bool AwaitCount(int expected_counter) {
fake_clock_env_.BlockUntilThreadsAsleep(1);
{
mutex_lock lock(counter_mu_);
return counter_ == expected_counter;
}
}
FakeClockEnv fake_clock_env_;
mutex counter_mu_;
int counter_;
PeriodicFunction pf_;
};
TEST_F(PeriodicFunctionWithFakeClockEnvTest, FasterThanRealTime) {
fake_clock_env_.AdvanceByMicroseconds(kPeriodMicros / 2);
for (int i = 2; i < 7; ++i) {
fake_clock_env_.AdvanceByMicroseconds(
kPeriodMicros);
EXPECT_TRUE(AwaitCount(i));
}
}
TEST_F(PeriodicFunctionWithFakeClockEnvTest, SlowerThanRealTime) {
Env::Default()->SleepForMicroseconds(
125 * 1000);
EXPECT_TRUE(AwaitCount(1));
}
TEST(PeriodicFunctionDeathTest, BadInterval) {
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function([]() {}, -1),
".* should be >= 0");
EXPECT_DEBUG_DEATH(PeriodicFunction periodic_function(
[]() {}, -1, PeriodicFunction::Options()),
".* should be >= 0");
}
}
}
} |
53 | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_HLO_PROTO_TO_GRAPH_VIEW_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_HLO_PROTO_TO_GRAPH_VIEW_H_
#include <string>
#include <string_view>
#include <vector>
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
namespace tensorflow {
namespace profiler {
struct GraphViewerParams {
std::string type;
std::string node_name;
int graph_width;
xla::HloRenderOptions render_options;
xla::RenderedGraphFormat format;
bool verbose;
bool show_metadata;
};
absl::StatusOr<GraphViewerParams> ParseGraphViewerParams(
const ToolOptions& options);
xla::RenderedGraphFormat GetRenderFormat(const std::string& format_string);
absl::StatusOr<std::string> ConvertHloProtoToGraph(
const xla::HloProto& hlo_proto, const std::string& node_name,
int graph_width, const xla::HloRenderOptions& render_options,
const xla::RenderedGraphFormat& format);
absl::StatusOr<std::string> RenderGraphView(
const xla::HloComputation& computation, absl::string_view label,
const xla::DebugOptions& debug_options, xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options = {});
absl::StatusOr<std::string> RenderGraphNeighborhoodAround(
const xla::HloInstruction& node, int radius,
xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options = {},
const absl::flat_hash_set<const xla::HloInstruction*>& boundary = {});
absl::StatusOr<std::string> ConvertHloProtoToStringView(
const xla::HloProto& hlo_proto, bool verbose, bool metadata);
absl::StatusOr<std::string> WrapDotInFormat(std::string dot,
xla::RenderedGraphFormat format);
std::string WrapDotInHtml(std::string dot);
void RegisterGraphvizURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view dot)> renderer);
}
}
#endif
#include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/profiler/utils/hlo_module_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_to_module.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::StatusOr;
using ::tensorflow::errors::InvalidArgument;
using ::xla::HloComputation;
using ::xla::HloInstruction;
using ::xla::HloModule;
using ::xla::HloPrintOptions;
using ::xla::HloProto;
using ::xla::HloRenderOptions;
using ::xla::RenderedGraphFormat;
void CleanUpHloModuleForGraphviz(HloModule* hlo_module) {
for (HloComputation* computation : hlo_module->computations()) {
for (HloInstruction* inst : computation->instructions()) {
if (inst->opcode() == xla::HloOpcode::kInfeed) {
inst->set_infeed_config("");
} else if (inst->opcode() == xla::HloOpcode::kOutfeed) {
inst->set_outfeed_config("");
}
}
}
}
absl::StatusOr<std::string> Plot(std::unique_ptr<HloModule> module,
const std::string& node_name, int graph_width,
const HloRenderOptions& render_options,
const RenderedGraphFormat& format) {
if (node_name.empty()) {
return InvalidArgument("node_name should not be empty");
}
const HloInstruction* instr = FindInstruction(*module, node_name);
const HloComputation* comp = FindComputation(*module, node_name);
if (!instr && !comp) {
return InvalidArgument(
absl::StrCat("Couldn't find HloInstruction or HloComputation named ",
node_name, "."));
}
absl::StatusOr<std::string> graph_handle;
CleanUpHloModuleForGraphviz(module.get());
if (comp) {
graph_handle =
RenderGraphView(*comp, "", comp->parent()->config().debug_options(),
format, render_options);
} else {
graph_handle = RenderGraphNeighborhoodAround(*instr, graph_width, format,
render_options);
}
if (graph_handle.ok()) {
VLOG(1) << graph_handle.value();
} else {
LOG(ERROR) << "Unable to render graph: " << graph_handle.status();
}
return graph_handle;
}
static constexpr char kGraphTypeName[] = "graph";
static constexpr char kShortTxtTypeName[] = "short_txt";
static constexpr char kLongTxtTypeName[] = "long_txt";
static constexpr char kDefaultFormatString[] = "url";
static constexpr int kDefaultWidth = 3;
static constexpr int kDefaultShowMetadata = 0;
static constexpr int kDefaultMergeFusion = 0;
}
absl::StatusOr<GraphViewerParams> ParseGraphViewerParams(
const ToolOptions& options) {
GraphViewerParams params;
std::optional<std::string> type = GetParam<std::string>(options, "type");
if (!type.has_value()) {
return errors::InvalidArgument("Graph viewer must provide a type option.");
}
if (type == kGraphTypeName) {
params.type = type.value();
if (std::optional<std::string> node_name =
GetParam<std::string>(options, "node_name")) {
params.node_name = node_name.value();
}
params.graph_width =
GetParamWithDefault<int>(options, "graph_width", kDefaultWidth);
params.render_options.show_backend_config = GetParamWithDefault<int>(
options, "show_metadata", kDefaultShowMetadata);
params.render_options.show_fusion_subcomputations =
!GetParamWithDefault<int>(options, "merge_fusion", kDefaultMergeFusion);
params.format = GetRenderFormat(GetParamWithDefault<std::string>(
options, "format", kDefaultFormatString));
return params;
}
if (type == kShortTxtTypeName || type == kLongTxtTypeName) {
params.type = type.value();
params.verbose = (type == kLongTxtTypeName);
params.show_metadata =
GetParamWithDefault(options, "show_metadata", kDefaultShowMetadata);
return params;
}
return errors::InvalidArgument("Unknown graph viewer type option: ",
type.value());
}
xla::RenderedGraphFormat GetRenderFormat(const std::string& format_string) {
if (format_string == "html") {
return xla::RenderedGraphFormat::kHtml;
} else if (format_string == "dot") {
return xla::RenderedGraphFormat::kDot;
} else if (format_string == "url") {
return xla::RenderedGraphFormat::kUrl;
} else {
LOG(ERROR) << "Invalid graph format argument: " << format_string
<< ", fallback to default url";
return xla::RenderedGraphFormat::kUrl;
}
}
absl::StatusOr<std::string> ConvertHloProtoToGraph(
const HloProto& hlo_proto, const std::string& node_name, int graph_width,
const HloRenderOptions& render_options, const RenderedGraphFormat& format) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
return Plot(std::move(hlo_module), node_name, graph_width, render_options,
format);
}
absl::StatusOr<std::string> ConvertHloProtoToStringView(
const HloProto& hlo_proto, bool verbose, bool metadata) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
HloPrintOptions options;
if (!verbose) {
options = HloPrintOptions::ShortParsable();
}
options.set_print_large_constants(verbose);
options.set_print_metadata(metadata);
return hlo_module->ToString(options);
}
std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer =
nullptr;
absl::Status CheckPrecondition(xla::RenderedGraphFormat format) {
if (format == xla::RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return absl::FailedPreconditionError(
"Can't render as URL; no URL renderer was registered.");
}
return absl::OkStatus();
}
absl::StatusOr<std::string> RenderGraphView(
const xla::HloComputation& computation, absl::string_view label,
const xla::DebugOptions& debug_options, xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot =
xla::RenderGraph(computation, label, debug_options,
RenderedGraphFormat::kDot, hlo_render_options);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> RenderGraphNeighborhoodAround(
const xla::HloInstruction& node, int radius,
xla::RenderedGraphFormat format, xla::HloRenderOptions hlo_render_options,
const absl::flat_hash_set<const xla::HloInstruction*>& boundary) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot = xla::RenderNeighborhoodAround(
node, radius, RenderedGraphFormat::kDot, hlo_render_options, boundary);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> WrapDotInFormat(std::string dot,
xla::RenderedGraphFormat format) {
switch (format) {
case xla::RenderedGraphFormat::kUrl:
if (url_renderer == nullptr) {
return absl::InternalError("url_renderer is null");
}
return (*url_renderer)(dot);
case xla::RenderedGraphFormat::kHtml:
return WrapDotInHtml(dot);
case xla::RenderedGraphFormat::kDot:
return std::string(dot);
}
}
std::string WrapDotInHtml(std::string dot) {
return absl::StrReplaceAll(R"html(
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style type="text/css">
body {
height: 100vh;
margin: 0;
}
#graph-container {height:95vh;width:100%;padding:10px;display:block;}
#graph-container svg { height: 100% !important; width: 100% !important;}
.node, .cluster {cursor:pointer;}
.cluster:hover, .node:hover {outline: solid 3px black;}
</style>
</head>
<body>
<script src="https:
integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ"
crossorigin="anonymous"></script>
<script src="https:
<div id="graph-container"></div>
<script>
const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm');
const hpccWasm = window["@hpcc-js/wasm"];
const data = `$DOT`;
const results = cssregex.exec(data);
let dot_data = data;
let css_data = '';
if (results !== null) {
css_data = results[1].replace(/\s*data:.*\s*,/,'');
css_data = unescape(css_data);
dot_data = data.replace(cssregex, '');
}
var render_start = performance.now()
function add_controls(svg) {
var htmlblob = new Blob([document.documentElement.innerHTML],
{type: 'text/html'});
var savehtml = document.createElement('a');
savehtml.setAttribute('href', URL.createObjectURL(htmlblob));
savehtml.setAttribute('download', 'graph.html');
savehtml.innerHTML = " [Save HTML+SVG] ";
document.body.append(savehtml);
var svgblob = new Blob([svg.outerHTML], {type: 'image/svg'});
var savesvg = document.createElement('a');
savesvg.setAttribute('href', URL.createObjectURL(svgblob));
savesvg.setAttribute('download', 'graph.svg');
savesvg.innerHTML = " [Save SVG] ";
document.body.append(savesvg);
var dotblob = new Blob([data], {type: 'text/dot'});
var savedot = document.createElement('a');
savedot.setAttribute('href', URL.createObjectURL(dotblob));
savedot.setAttribute('download', 'graph.dot');
savedot.innerHTML = " [Save DOT] ";
document.body.append(savedot);
var render_end = performance.now();
var render_note = document.createElement('div')
render_note.innerHTML = 'Rendering took '
+ (render_end - render_start).toFixed(2) + "ms."
document.body.append(render_note);
}
const render_callback = svg => {
const container = document.getElementById('graph-container')
container.innerHTML = `${svg}<style>${css_data}</style>`;
const panZoom = svgPanZoom(container.children[0], {
zoomEnabled: true,
controlIconsEnabled: true,
maxZoom: 200,
minZoom: 0,
});
add_controls(svg);
};
hpccWasm.graphviz.layout(dot_data, "svg", "dot").then(render_callback);
</script>
</body>
</html>
)html",
{
{"$DOT", dot},
});
}
void RegisterGraphvizURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) {
if (url_renderer != nullptr) {
LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call "
"wins, but because order of initialization in C++ is "
"nondeterministic, this may not be what you want.";
}
delete url_renderer;
url_renderer =
new std::function<absl::StatusOr<std::string>(absl::string_view)>(
std::move(renderer));
}
}
} | #include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <string>
#include <variant>
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
TEST(GraphViewerParamsTest, GraphType) {
ToolOptions options1;
options1["type"] = "graph";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "graph");
EXPECT_EQ(params1.node_name, "");
EXPECT_EQ(params1.graph_width, 3);
EXPECT_EQ(params1.render_options.show_backend_config, false);
EXPECT_EQ(params1.render_options.show_fusion_subcomputations, true);
EXPECT_EQ(params1.format, xla::RenderedGraphFormat::kUrl);
ToolOptions options2;
options2["type"] = "graph";
options2["node_name"] = "fusion.111";
options2["graph_width"] = 10;
options2["show_metadata"] = 1;
options2["merge_fusion"] = 1;
options2["format"] = "html";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "graph");
EXPECT_EQ(params2.node_name, "fusion.111");
EXPECT_EQ(params2.graph_width, 10);
EXPECT_EQ(params2.render_options.show_backend_config, true);
EXPECT_EQ(params2.render_options.show_fusion_subcomputations, false);
EXPECT_EQ(params2.format, xla::RenderedGraphFormat::kHtml);
}
TEST(GraphViewerParamsTest, ShortTxtType) {
ToolOptions options1;
options1["type"] = "short_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "short_txt");
EXPECT_EQ(params1.verbose, false);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "short_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "short_txt");
EXPECT_EQ(params2.verbose, false);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, LongTxtType) {
ToolOptions options1;
options1["type"] = "long_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "long_txt");
EXPECT_EQ(params1.verbose, true);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "long_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "long_txt");
EXPECT_EQ(params2.verbose, true);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, OtherTypes) {
ToolOptions options1;
EXPECT_THAT(ParseGraphViewerParams(options1),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Graph viewer must provide a type option")));
ToolOptions options2;
options2["type"] = "abcd";
EXPECT_THAT(ParseGraphViewerParams(options2),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Unknown graph viewer type option: abcd")));
}
}
}
} |
54 | #ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_TOPK_ACCURACY_EVAL_STAGE_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_STAGES_TOPK_ACCURACY_EVAL_STAGE_H_
#include <string>
#include <vector>
#include "tensorflow/lite/tools/evaluation/evaluation_stage.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
namespace tflite {
namespace evaluation {
class TopkAccuracyEvalStage : public EvaluationStage {
public:
explicit TopkAccuracyEvalStage(const EvaluationStageConfig& config)
: EvaluationStage(config) {}
TfLiteStatus Init() override;
TfLiteStatus Run() override;
EvaluationStageMetrics LatestMetrics() override;
~TopkAccuracyEvalStage() override {}
void SetTaskInfo(const std::vector<std::string>& all_labels,
TfLiteType model_output_type,
TfLiteIntArray* model_output_shape) {
ground_truth_labels_ = all_labels;
model_output_type_ = model_output_type;
model_output_shape_ = model_output_shape;
}
void SetEvalInputs(void* model_raw_output, std::string* ground_truth_label) {
model_output_ = model_raw_output;
ground_truth_label_ = ground_truth_label;
}
private:
void UpdateCounts(const std::vector<int>& topk_indices);
std::vector<std::string> ground_truth_labels_;
TfLiteType model_output_type_ = kTfLiteNoType;
TfLiteIntArray* model_output_shape_ = nullptr;
int num_total_labels_;
void* model_output_ = nullptr;
std::string* ground_truth_label_ = nullptr;
int num_runs_;
std::vector<int> accuracy_counts_;
};
}
}
#endif
#include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <algorithm>
#include <numeric>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
std::vector<int> GetTopKIndices(const std::vector<float>& values, int k) {
std::vector<int> indices(values.size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&values](int a, int b) { return values[a] > values[b]; });
indices.resize(k);
return indices;
}
}
TfLiteStatus TopkAccuracyEvalStage::Init() {
num_runs_ = 0;
auto& params = config_.specification().topk_accuracy_eval_params();
if (!params.has_k()) {
LOG(ERROR) << "Value of k not provided for TopkAccuracyEvalStage";
return kTfLiteError;
}
accuracy_counts_ = std::vector<int>(params.k(), 0);
if (ground_truth_labels_.empty()) {
LOG(ERROR) << "Ground-truth labels are empty";
return kTfLiteError;
}
num_total_labels_ = ground_truth_labels_.size();
if (params.k() > num_total_labels_) {
LOG(ERROR) << "k is too large";
return kTfLiteError;
}
if (!model_output_shape_) {
LOG(ERROR) << "Model output details not correctly set";
return kTfLiteError;
}
if (!(model_output_shape_->size == 2) ||
!(model_output_shape_->data[0] == 1) ||
!(model_output_shape_->data[1] == num_total_labels_)) {
LOG(ERROR) << "Invalid model_output_shape_";
return kTfLiteError;
}
if (model_output_type_ != kTfLiteFloat32 &&
model_output_type_ != kTfLiteUInt8 && model_output_type_ != kTfLiteInt8) {
LOG(ERROR) << "model_output_type_ not supported";
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus TopkAccuracyEvalStage::Run() {
if (!model_output_) {
LOG(ERROR) << "model_output_ not set correctly";
return kTfLiteError;
}
if (!ground_truth_label_) {
LOG(ERROR) << "ground_truth_label_ not provided";
return kTfLiteError;
}
auto& params = config_.specification().topk_accuracy_eval_params();
std::vector<float> probabilities;
probabilities.reserve(num_total_labels_);
if (model_output_type_ == kTfLiteFloat32) {
auto probs = static_cast<float*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteUInt8) {
auto probs = static_cast<uint8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteInt8) {
auto probs = static_cast<int8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
}
std::vector<int> top_k = GetTopKIndices(probabilities, params.k());
UpdateCounts(top_k);
return kTfLiteOk;
}
EvaluationStageMetrics TopkAccuracyEvalStage::LatestMetrics() {
EvaluationStageMetrics metrics;
if (num_runs_ == 0) return metrics;
metrics.set_num_runs(num_runs_);
auto* topk_metrics =
metrics.mutable_process_metrics()->mutable_topk_accuracy_metrics();
for (const auto& count : accuracy_counts_) {
topk_metrics->add_topk_accuracies(static_cast<float>(count) / num_runs_);
}
return metrics;
}
void TopkAccuracyEvalStage::UpdateCounts(const std::vector<int>& topk_indices) {
for (size_t i = 0; i < topk_indices.size(); ++i) {
if (*ground_truth_label_ == ground_truth_labels_[topk_indices[i]]) {
for (size_t j = i; j < topk_indices.size(); j++) {
accuracy_counts_[j] += 1;
}
break;
}
}
num_runs_++;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kTopkAccuracyEvalStageName[] = "topk_accuracy_eval_stage";
constexpr int kNumCategories = 1001;
EvaluationStageConfig GetTopkAccuracyEvalStageConfig() {
EvaluationStageConfig config;
config.set_name(kTopkAccuracyEvalStageName);
auto* params =
config.mutable_specification()->mutable_topk_accuracy_eval_params();
params->set_k(5);
return config;
}
template <typename T>
T* ResetOutputArray(T array[]) {
for (int i = 0; i < kNumCategories; i++) {
array[i] = 0;
}
return array;
}
std::vector<std::string> CreateGroundTruthLabels() {
std::vector<std::string> ground_truth_labels;
ground_truth_labels.reserve(kNumCategories);
for (int i = 0; i < kNumCategories; i++) {
ground_truth_labels.push_back(std::to_string(i));
}
return ground_truth_labels;
}
TEST(TopkAccuracyEvalStage, NoInitializers) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, NoK) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()
->mutable_topk_accuracy_eval_params()
->clear_k();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoGroundTruthLabels) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = {};
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, KTooLarge) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()->mutable_topk_accuracy_eval_params()->set_k(
10000);
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, WeirdModelOutputShape) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, UnsupportedModelOutputType) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteComplex64;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoInputs) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, InvalidGroundTruth) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[0] = 0.8;
stage.SetEvalInputs(tensor, nullptr);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, FloatTest_CorrectLabelsAtLastIndices) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[4] = 0.9;
tensor[3] = 0.8;
tensor[2] = 0.7;
tensor[1] = 0.6;
tensor[0] = 0.5;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
for (int i = 0; i < 4; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
EXPECT_FLOAT_EQ(0.5, accuracy_metrics.topk_accuracies(3));
for (int i = 0; i < 3; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
}
class CorrectTopkAccuracyEvalTest : public ::testing::Test {
protected:
template <typename T>
void VerifyCorrectBehaviorForType(T ground_truth_0_value,
T ground_truth_1_value,
TfLiteType model_output_type) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
stage.SetTaskInfo(ground_truth_labels, model_output_type,
model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(0, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_EQ(0, accuracy_metrics.topk_accuracies_size());
T array[kNumCategories];
T* tensor = ResetOutputArray(array);
tensor[0] = ground_truth_0_value;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
tensor[1] = ground_truth_1_value;
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
}
};
TEST_F(CorrectTopkAccuracyEvalTest, FloatTest) {
VerifyCorrectBehaviorForType(static_cast<float>(0.8), static_cast<float>(0.9),
kTfLiteFloat32);
}
TEST_F(CorrectTopkAccuracyEvalTest, Int8Test) {
VerifyCorrectBehaviorForType(static_cast<int8_t>(1), static_cast<int8_t>(2),
kTfLiteInt8);
}
TEST_F(CorrectTopkAccuracyEvalTest, UInt8Test) {
VerifyCorrectBehaviorForType(static_cast<uint8_t>(1), static_cast<uint8_t>(2),
kTfLiteUInt8);
}
}
}
} |
55 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_CANONICALIZE_VALUE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_CANONICALIZE_VALUE_H_
#include <string>
#include "absl/strings/string_view.h"
namespace tflite::acceleration {
std::string CanonicalizeValue(absl::string_view value);
std::string CanonicalizeValueWithKey(absl::string_view key,
absl::string_view value);
}
#endif
#include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <iterator>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "re2/re2.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
inline char ascii_normalise(const unsigned char c) {
if (c == ' ' || c == '-') {
return '_';
}
return absl::ascii_tolower(c);
}
}
std::string CanonicalizeValue(absl::string_view value) {
std::string output;
absl::c_transform(value, std::back_inserter(output),
tflite::acceleration::ascii_normalise);
return output;
}
std::string CanonicalizeValueWithKey(absl::string_view key,
absl::string_view value) {
std::string output = CanonicalizeValue(value);
std::string gpu_output;
return key == kGPUModel &&
RE2::FullMatch(
output,
R"((angle_\(samsung_xclipse_[0-9]*\)_on_vulkan).*$)",
&gpu_output)
? gpu_output
: output;
}
} | #include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite::acceleration {
namespace {
TEST(CanonicalizeValue, CharactersAreLowercased) {
EXPECT_EQ(CanonicalizeValue("hElLo"), "hello");
}
TEST(CanonicalizeValue, HyphensAreReplaced) {
EXPECT_EQ(CanonicalizeValue("-"), "_");
}
TEST(CanonicalizeValue, SpacesAreReplaced) {
EXPECT_EQ(CanonicalizeValue(" "), "_");
}
TEST(CanonicalizeValue, OtherSpecialCharactersAreUnaffected) {
for (unsigned char c = 0; c < 65; ++c) {
if (c == ' ' || c == '-') continue;
std::string s = {1, static_cast<char>(c)};
EXPECT_EQ(CanonicalizeValue(s), s);
}
}
TEST(CanonicalizeValue, SamsungXclipseGpuNormalized) {
EXPECT_EQ(CanonicalizeValueWithKey(
kGPUModel, "ANGLE (Samsung Xclipse 920) on Vulkan 1.1.179"),
"angle_(samsung_xclipse_920)_on_vulkan");
}
}
} |
56 | #ifndef TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_READER_H_
#define TENSORFLOW_C_EAGER_C_API_EXPERIMENTAL_READER_H_
#include "tensorflow/c/eager/c_api.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TFE_MonitoringCounterReader TFE_MonitoringCounterReader;
TF_CAPI_EXPORT extern TFE_MonitoringCounterReader*
TFE_MonitoringNewCounterReader(const char* name);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringReadCounter0(
TFE_MonitoringCounterReader*);
TF_CAPI_EXPORT extern int64_t TFE_MonitoringReadCounter1(
TFE_MonitoringCounterReader*, const char* label_value);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/c/eager/c_api_experimental_reader.h"
#include "tensorflow/c/eager/tfe_monitoring_reader_internal.h"
template <typename... LabelType>
int64_t TFE_MonitoringCounterReader::Read(const LabelType&... labels) {
return counter->Read(labels...);
}
TFE_MonitoringCounterReader* TFE_MonitoringNewCounterReader(const char* name) {
auto* result = new TFE_MonitoringCounterReader(name);
return result;
}
int64_t TFE_MonitoringReadCounter0(TFE_MonitoringCounterReader* cell_reader) {
int64_t result = cell_reader->Read();
return result;
}
int64_t TFE_MonitoringReadCounter1(TFE_MonitoringCounterReader* cell_reader,
const char* label) {
int64_t result = cell_reader->Read(label);
return result;
} | #include "tensorflow/c/eager/c_api_experimental_reader.h"
#include <cstdint>
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name);
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label);
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta = 1);
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta = 1);
TEST(CAPI, MonitoringCellReader0) {
auto counter_name = "test/counter0";
auto* counter = CreateCounter0(counter_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter0(counter);
int64_t actual = TFE_MonitoringReadCounter0(reader);
CHECK_EQ(actual, 1);
}
TEST(CAPI, MonitoringCellReader1) {
auto counter_name = "test/counter1";
auto label_name = "test/label";
auto* counter = CreateCounter1(counter_name, label_name);
auto* reader = TFE_MonitoringNewCounterReader(counter_name);
IncrementCounter1(counter, label_name);
int64_t actual = TFE_MonitoringReadCounter1(reader, label_name);
CHECK_EQ(actual, 1);
}
TFE_MonitoringCounter0* CreateCounter0(const char* counter_name) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter0(counter_name, status, "description");
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter0(TFE_MonitoringCounter0* counter, int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter0(counter);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
TFE_MonitoringCounter1* CreateCounter1(const char* counter_name,
const char* label) {
TF_Status* status = TF_NewStatus();
auto* counter =
TFE_MonitoringNewCounter1(counter_name, status, "description", label);
TF_DeleteStatus(status);
return counter;
}
void IncrementCounter1(TFE_MonitoringCounter1* counter, const char* label,
int64_t delta) {
auto* cell = TFE_MonitoringGetCellCounter1(counter, label);
TFE_MonitoringCounterCellIncrementBy(cell, delta);
}
}
} |
57 | #ifndef TENSORFLOW_CC_FRAMEWORK_CC_OP_GEN_H_
#define TENSORFLOW_CC_FRAMEWORK_CC_OP_GEN_H_
#include <string>
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace cc_op {
void WriteCCOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& dot_h_fname, const string& dot_cc_fname);
}
}
#endif
#include "tensorflow/cc/framework/cc_op_gen.h"
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/cc/framework/cc_op_gen_util.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace cc_op {
namespace {
const int kRightMargin = 79;
string GetConstructorDecl(const OpInfo& op_info, StringPiece op_name_prefix,
bool include_attr) {
const string prefix = strings::StrCat(op_name_prefix, op_info.op_name, "(");
string c_decl;
for (int i = 0; i < op_info.arg_types.size(); ++i) {
if (i > 0) strings::StrAppend(&c_decl, ", ");
strings::StrAppend(&c_decl, op_info.arg_types[i], " ",
op_info.arg_names[i]);
}
if (include_attr && op_info.has_optional_attrs) {
strings::StrAppend(&c_decl, ", const ", op_info.op_name, "::Attrs& attrs");
}
strings::StrAppend(&c_decl, ")");
return WordWrap(prefix, c_decl, kRightMargin);
}
void WriteClassDecl(const OpInfo& op_info, WritableFile* h) {
string class_decl = op_info.comment;
strings::StrAppend(&class_decl, "class ", op_info.op_name, " {\n");
strings::StrAppend(&class_decl, " public:\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, op_info.GetOpAttrStruct());
}
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", false),
";\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", true),
";\n");
}
if (op_info.output_types.empty()) {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Operation() const { "
"return operation; }\n");
} else if (op_info.output_types.size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(&class_decl,
" ::tensorflow::Output operator[](size_t index) "
"const { return ",
op_info.output_names[0], "[index]; }\n\n");
} else {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Output() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" operator ::tensorflow::Input() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" ::tensorflow::Node* node() const { return ",
op_info.output_names[0], ".node(); }\n");
}
}
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, "\n");
for (int i = 0; i < op_info.graph_op_def.attr_size(); ++i) {
const auto& attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if ((op_info.inferred_input_attrs.find(attr.name()) !=
op_info.inferred_input_attrs.end()) ||
!api_def_attr.has_default_value()) {
continue;
}
const auto entry = AttrTypeName(attr.type());
const auto attr_type_name = entry.first;
const bool use_const = entry.second;
const string camel_case_name = ToCamelCase(api_def_attr.rename_to());
const string suffix =
(camel_case_name == op_info.op_name || camel_case_name == "Attrs")
? "_"
: "";
const string attr_func_def = strings::StrCat(
camel_case_name, suffix, "(", use_const ? "const " : "",
attr_type_name, use_const ? "&" : "");
strings::StrAppend(&class_decl, " static Attrs ", attr_func_def,
" x) {\n");
strings::StrAppend(&class_decl, " return Attrs().", camel_case_name,
suffix, "(x);\n");
strings::StrAppend(&class_decl, " }\n");
}
}
strings::StrAppend(&class_decl, "\n Operation operation;\n");
for (int i = 0; i < op_info.output_types.size(); ++i) {
strings::StrAppend(&class_decl, " ", op_info.output_types[i], " ",
op_info.output_names[i], ";\n");
}
strings::StrAppend(&class_decl, "};\n");
if (!op_info.aliases.empty()) {
for (const auto& alias : op_info.aliases) {
strings::StrAppend(&class_decl, "typedef ", op_info.op_name, " ", alias,
";\n");
}
}
strings::StrAppend(&class_decl, "\n");
TF_CHECK_OK(h->Append(class_decl));
}
void GetOutput(const OpInfo& op_info, string* out) {
const string scope_str = op_info.arg_names[0];
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(out, " this->operation = Operation(ret);\n");
if (op_info.graph_op_def.output_arg_size() == 0) {
strings::StrAppend(out, " return;\n");
return;
}
if (op_info.graph_op_def.output_arg_size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(out,
" for (int32 i = 0; i < ret->num_outputs(); ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[0],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[0],
" = Output(ret, 0);\n");
}
return;
}
strings::StrAppend(out, " ::tensorflow::NameRangeMap _outputs_range;\n");
strings::StrAppend(out,
" ::tensorflow::Status _status_ = "
"::tensorflow::NameRangesForNode(*ret, ret->op_def(), "
"nullptr, &_outputs_range);\n");
strings::StrAppend(out, " if (!_status_.ok()) {\n", " ", scope_str,
".UpdateStatus(_status_);\n", " return;\n");
strings::StrAppend(out, " }\n\n");
for (int i = 0; i < op_info.graph_op_def.output_arg_size(); ++i) {
const string arg_range = strings::StrCat(
"_outputs_range[\"", op_info.graph_op_def.output_arg(i).name(), "\"]");
if (op_info.is_list_output[i]) {
strings::StrAppend(out, " for (int32 i = ", arg_range, ".first; i < ",
arg_range, ".second; ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[i],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[i],
" = Output(ret, ", arg_range, ".first);\n");
}
}
}
string GetConstructorBody(const OpInfo& op_info) {
const string scope_str = op_info.arg_names[0];
string body;
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(&body, " ", return_on_error, "\n");
for (int i = 0; i < op_info.graph_op_def.input_arg_size(); ++i) {
const auto& arg(op_info.graph_op_def.input_arg(i));
const auto& api_def_arg(op_info.api_def.in_arg(i));
strings::StrAppend(
&body, " auto _", api_def_arg.rename_to(), " = ::tensorflow::ops::",
ArgIsList(arg) ? "AsNodeOutList" : "AsNodeOut", "(", scope_str, ", ",
AvoidCPPKeywords(api_def_arg.rename_to()), ");\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
}
strings::StrAppend(&body, " ::tensorflow::Node* ret;\n");
strings::StrAppend(&body, " const auto unique_name = ", scope_str,
".GetUniqueNameForOp(\"", op_info.op_name, "\");\n");
strings::StrAppend(
&body, " auto builder = ::tensorflow::NodeBuilder(unique_name, \"",
op_info.graph_op_def.name(), "\")\n");
const string spaces = " ";
for (int i = 0; i < op_info.api_def.in_arg_size(); ++i) {
const auto& arg(op_info.api_def.in_arg(i));
strings::StrAppend(&body, spaces, ".Input(_", arg.rename_to(), ")\n");
}
for (int i = 0; i < op_info.api_def.attr_size(); ++i) {
const auto& graph_attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if (op_info.inferred_input_attrs.find(api_def_attr.name()) !=
op_info.inferred_input_attrs.end()) {
continue;
}
const string attr_name =
api_def_attr.has_default_value()
? strings::StrCat("attrs.", api_def_attr.rename_to(), "_")
: AvoidCPPKeywords(api_def_attr.rename_to());
strings::StrAppend(&body, spaces, ".Attr(\"", graph_attr.name(), "\", ",
attr_name, ")\n");
}
strings::StrAppend(&body, " ;\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateBuilder(&builder);\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(builder.Finalize(",
scope_str, ".graph(), &ret));\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(", scope_str,
".DoShapeInference(ret));\n");
GetOutput(op_info, &body);
return body;
}
void WriteClassDef(const OpInfo& op_info, WritableFile* cc) {
string class_def;
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
true),
" {\n");
strings::StrAppend(&class_def, GetConstructorBody(op_info));
strings::StrAppend(&class_def, "}\n\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
false));
strings::StrAppend(&class_def, "\n : ", op_info.op_name, "(");
int i = 0;
for (; i < op_info.arg_names.size(); ++i) {
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.arg_names[i]);
}
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.op_name, "::Attrs()");
strings::StrAppend(&class_def, ") {}\n\n");
}
TF_CHECK_OK(cc->Append(class_def));
}
void WriteCCOp(const OpDef& graph_op_def, const ApiDef& api_def,
const std::vector<string>& aliases, WritableFile* h,
WritableFile* cc) {
OpInfo op_info(graph_op_def, api_def, aliases);
WriteClassDecl(op_info, h);
WriteClassDef(op_info, cc);
}
void StartFiles(bool internal, const string& dot_h_fname, WritableFile* h,
WritableFile* cc, string* op_header_guard) {
const string header =
R"header(
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
)header";
const string namespace_begin = internal ? R"namespace(
namespace tensorflow {
namespace ops {
namespace internal {
)namespace"
: R"namespace(
namespace tensorflow {
namespace ops {
)namespace";
const string op_header = GetPath(dot_h_fname);
*op_header_guard = ToGuard(op_header);
const string cc_header = strings::StrCat(
R"include(
#include "tensorflow/cc/ops/const_op.h"
)include",
"#include \"", op_header, "\"\n", namespace_begin);
const string filename = GetFilename(dot_h_fname);
const string doxygen = strings::StrCat("
ToTitle(filename), "\n", "
TF_CHECK_OK(h->Append(
strings::StrCat("
"#ifndef ",
*op_header_guard,
"\n"
"#define ",
*op_header_guard, "\n\n")));
TF_CHECK_OK(h->Append(header));
TF_CHECK_OK(h->Append(namespace_begin));
TF_CHECK_OK(h->Append(doxygen));
TF_CHECK_OK(cc->Append(cc_header));
}
void FinishFiles(bool internal, WritableFile* h, WritableFile* cc,
const string& op_header_guard) {
const string footer = internal ? R"footer(}
}
}
)footer"
:
R"footer(
}
}
)footer";
TF_CHECK_OK(h->Append(footer));
TF_CHECK_OK(
h->Append(strings::StrCat("\n#endif ", "
TF_CHECK_OK(cc->Append(footer));
TF_CHECK_OK(cc->Close());
TF_CHECK_OK(h->Close());
}
string MakeInternal(const string& fname) {
auto dot_pos = fname.rfind('.');
if (dot_pos == string::npos) {
return strings::StrCat(fname, "_internal");
} else {
return strings::StrCat(fname.substr(0, dot_pos), "_internal",
fname.substr(dot_pos));
}
}
}
void WriteCCOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& dot_h_fname, const string& dot_cc_fname) {
Env* env = Env::Default();
std::unique_ptr<WritableFile> h = nullptr;
std::unique_ptr<WritableFile> cc = nullptr;
TF_CHECK_OK(env->NewWritableFile(dot_h_fname, &h));
TF_CHECK_OK(env->NewWritableFile(dot_cc_fname, &cc));
string op_header_guard;
StartFiles(false, dot_h_fname, h.get(), cc.get(), &op_header_guard);
std::unique_ptr<WritableFile> internal_h = nullptr;
std::unique_ptr<WritableFile> internal_cc = nullptr;
const string internal_dot_h_fname = MakeInternal(dot_h_fname);
TF_CHECK_OK(env->NewWritableFile(internal_dot_h_fname, &internal_h));
TF_CHECK_OK(env->NewWritableFile(MakeInternal(dot_cc_fname), &internal_cc));
string internal_op_header_guard;
StartFiles(true , internal_dot_h_fname, internal_h.get(),
internal_cc.get(), &internal_op_header_guard);
for (const auto& graph_op_def : ops.op()) {
if (graph_op_def.has_deprecation() &&
graph_op_def.deprecation().version() <= TF_GRAPH_DEF_VERSION) {
continue;
}
if (graph_op_def.name() == "Const") continue;
const auto* api_def = api_def_map.GetApiDef(graph_op_def.name());
std::vector<string> aliases;
if (api_def->visibility() == ApiDef::SKIP) continue;
for (int endpoint_i = 1; endpoint_i < api_def->endpoint_size();
++endpoint_i) {
aliases.push_back(api_def->endpoint(endpoint_i).name());
}
if (api_def->visibility() == ApiDef::HIDDEN) {
WriteCCOp(graph_op_def, *api_def, aliases, internal_h.get(),
internal_cc.get());
continue;
}
WriteCCOp(graph_op_def, *api_def, aliases, h.get(), cc.get());
}
FinishFiles(false, h.get(), cc.get(), op_header_guard);
FinishFiles(true , internal_h.get(), internal_cc.get(),
internal_op_header_guard);
}
}
} | #include "tensorflow/cc/framework/cc_op_gen.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "images"
description: "Images to process."
}
input_arg {
name: "dim"
description: "Description for dim."
type: DT_FLOAT
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for images"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
default_value {
i: 1
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectDoesNotHaveSubstr(StringPiece s, StringPiece expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' contains '" << expected << "'";
}
void ExpectSubstrOrder(const string& s, const string& before,
const string& after) {
int before_pos = s.find(before);
int after_pos = s.find(after);
ASSERT_NE(std::string::npos, before_pos);
ASSERT_NE(std::string::npos, after_pos);
EXPECT_LT(before_pos, after_pos)
<< before << " is not before " << after << " in " << s;
}
void GenerateCcOpFiles(Env* env, const OpList& ops,
const ApiDefMap& api_def_map, string* h_file_text,
string* internal_h_file_text) {
const string& tmpdir = testing::TmpDir();
const auto h_file_path = io::JoinPath(tmpdir, "test.h");
const auto cc_file_path = io::JoinPath(tmpdir, "test.cc");
const auto internal_h_file_path = io::JoinPath(tmpdir, "test_internal.h");
const auto internal_cc_file_path = io::JoinPath(tmpdir, "test_internal.cc");
cc_op::WriteCCOps(ops, api_def_map, h_file_path, cc_file_path);
TF_ASSERT_OK(ReadFileToString(env, h_file_path, h_file_text));
TF_ASSERT_OK(
ReadFileToString(env, internal_h_file_path, internal_h_file_text));
}
TEST(CcOpGenTest, TestVisibilityChangedToHidden) {
const string api_def = R"(
op {
graph_op_name: "Foo"
visibility: HIDDEN
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string h_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(internal_h_file_text, "class Foo");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(internal_h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo");
}
TEST(CcOpGenTest, TestArgNameChanges) {
const string api_def = R"(
op {
graph_op_name: "Foo"
arg_order: "dim"
arg_order: "images"
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input images", "Input dim");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input dim", "Input images");
}
TEST(CcOpGenTest, TestEndpoints) {
const string api_def = R"(
op {
graph_op_name: "Foo"
endpoint {
name: "Foo1"
}
endpoint {
name: "Foo2"
}
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo {");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo1");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo2");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo1");
ExpectHasSubstr(h_file_text, "typedef Foo1 Foo2");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo {");
}
}
} |
58 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_PER_IMAGE_STANDARDIZATION_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_ALGO_PER_IMAGE_STANDARDIZATION_H_
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace per_image_standardization {
const algo::Algo* Impl_PerImageStandardization();
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cmath>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace per_image_standardization {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline void PerImageStandardization(dim_t batches, dim_t height, dim_t width,
dim_t num_channels, const float* input_data,
float* output_data) {
const dim_t num_pixels_per_image = height * width * num_channels;
const float inv_num_pixels_per_image = 1.0f / num_pixels_per_image;
for (ind_t b = 0; b < batches; ++b) {
const dim_t offset = b * num_pixels_per_image;
const float* input_ptr = input_data + offset;
float* output_ptr = output_data + offset;
float mean = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
mean += input_ptr[i];
}
mean *= inv_num_pixels_per_image;
float variance = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
const float diff = input_ptr[i] - mean;
variance += diff * diff * inv_num_pixels_per_image;
output_ptr[i] = diff;
}
const float inv_adjusted_stddev =
fmin(num_pixels_per_image, 1.0f / sqrt(variance));
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
output_ptr[i] *= inv_adjusted_stddev;
}
}
}
void ComputePerImageStandardization(const InputPack& inputs,
const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t img_num_channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({img_num_batches, img_height, img_width, img_num_channels});
float* output_data = reinterpret_cast<float*>(output->Data());
PerImageStandardization(img_num_batches, img_height, img_width,
img_num_channels, img_data, output_data);
}
}
const Algo* Impl_PerImageStandardization() {
static const Algo per_image_standardization = {
&ComputePerImageStandardization, nullptr};
return &per_image_standardization;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace per_image_standardization {
namespace {
struct PerImageStandardizationTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
};
class PerImageStandardizationTest
: public testing::TestWithParam<PerImageStandardizationTestParams> {};
TEST_P(PerImageStandardizationTest, FloatPixelType) {
const PerImageStandardizationTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* per_image_standardization = Impl_PerImageStandardization();
per_image_standardization->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), img.Dims());
constexpr float kAbsError = 0.01f;
float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
PerImageStandardizationTests, PerImageStandardizationTest,
testing::ValuesIn({
PerImageStandardizationTestParams{
{1, 2, 2, 1},
{1, 2,
3, 4},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407}},
PerImageStandardizationTestParams{
{2, 2, 2, 1},
{1, 2,
3, 4,
1, 2,
4, 8},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407,
-1.0257553, -0.65275335,
0.09325048, 1.5852581}},
PerImageStandardizationTestParams{
{2, 2, 2, 2},
{1, 2,
1, 3,
1, 4,
1, 5,
1, 2,
2, 2,
3, 2,
4, 2},
{-0.8451542, -0.16903085,
-0.8451542, 0.50709254,
-0.8451542, 1.1832159,
-0.8451542, 1.8593392,
-1.5075567, -0.30151135,
-0.30151135, -0.30151135,
0.904534, -0.30151135,
2.1105793, -0.30151135}},
}));
}
}
} |
59 | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_HOST_RUNTIME_TPU_METADATA_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_TRANSFORMS_HOST_RUNTIME_TPU_METADATA_UTILS_H_
#include <optional>
#include "mlir/IR/Diagnostics.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace mlir {
namespace TFTPU {
LogicalResult SetMetadataProtoFromClusterFuncOp(
tf_device::ClusterFuncOp op, int num_replicas, int num_cores_per_replica,
std::optional<xla::DeviceAssignmentProto>&& xla_device_assignment,
tensorflow::tpu::TPUCompileMetadataProto* metadata);
}
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <optional>
#include <string>
#include <utility>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace mlir {
namespace TFTPU {
namespace {
constexpr char kStepMarkerLocationAttr[] = "step_marker_location";
constexpr char kUseXlaSpmdAttr[] = "use_spmd_for_xla_partitioning";
constexpr char kBadStringArrayElementMsg[] =
"bad '{0}' attribute at index {1}, not a string";
constexpr char kBadArrayElementMsg[] =
"bad '{0}' attribute at index {1} with value '{2}': failed to parse to {3}";
constexpr char kBadArrayAttrLengthMsg[] =
"bad '{0}' attribute, expected array attribute of size {1}, got size {2}";
std::string CreateMissingAttributeMsg(llvm::StringRef attribute) {
return llvm::formatv("requires attribute '{0}'", attribute).str();
}
LogicalResult SetMetadataProtoStepMarkerLocation(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto step_marker_location =
op->getAttrOfType<StringAttr>(kStepMarkerLocationAttr);
if (!step_marker_location)
return op.emitOpError(CreateMissingAttributeMsg(kStepMarkerLocationAttr));
xla::DebugOptions::StepMarkerLocation location =
xla::DebugOptions::STEP_MARK_AT_ENTRY;
if (!step_marker_location.getValue().empty() &&
!xla::DebugOptions::StepMarkerLocation_Parse(
std::string(step_marker_location.getValue()), &location))
return op.emitOpError(llvm::formatv("bad '{0}' attribute with value '{1}'",
kStepMarkerLocationAttr,
step_marker_location.getValue()));
metadata->set_step_marker_location(location);
return success();
}
LogicalResult SetOpSharding(Operation* op, Attribute attr, llvm::StringRef name,
int index, xla::OpSharding* sharding_ptr) {
auto sharding_attr = mlir::dyn_cast<StringAttr>(attr);
if (!sharding_attr)
return op->emitOpError(
llvm::formatv(kBadStringArrayElementMsg, name, index));
if (tensorflow::DecodeShardingAttribute(sharding_attr, *sharding_ptr)
.failed()) {
return op->emitOpError(llvm::formatv(kBadArrayElementMsg, name, index,
sharding_attr.getValue(),
"xla::OpSharding"));
}
return success();
}
LogicalResult SetMetadataProtoArgs(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto input_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kInputShardingAttr);
if (!input_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kInputShardingAttr));
if (input_shardings.size() != op.getNumOperands())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kInputShardingAttr,
op.getNumOperands(), input_shardings.size()));
mlir::StringAttr replication_attr_name = mlir::StringAttr::get(
op.getContext(), "mhlo.is_same_data_across_replicas");
auto dynamic_arg_idx = op->getAttrOfType<ArrayAttr>(TF::kDynamicArgIndexAttr);
llvm::SmallSet<int, 4> dynamic_arg_idx_set;
if (dynamic_arg_idx) {
for (auto idx : dynamic_arg_idx.getValue()) {
dynamic_arg_idx_set.insert(mlir::dyn_cast<IntegerAttr>(idx).getInt());
}
}
for (auto operand_type_and_idx : llvm::enumerate(op.getOperandTypes())) {
Type operand_type = operand_type_and_idx.value();
int index = operand_type_and_idx.index();
tensorflow::tpu::TPUCompileMetadataProto::Arg* arg = metadata->add_args();
tensorflow::DataType dtype;
tensorflow::Status status =
tensorflow::ConvertToDataType(operand_type, &dtype);
if (!status.ok())
return op.emitOpError(
llvm::formatv("failed to determine operand type at index {0}: {1}",
index, status.message()));
arg->set_dtype(dtype);
if (dtype == tensorflow::DT_RESOURCE)
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::VARIABLE);
else
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::PARAMETER);
*arg->mutable_shape() = tensorflow::TensorShapeProto();
if (auto ranked_tensor_type =
mlir::dyn_cast<RankedTensorType>(operand_type)) {
tensorflow::TensorShapeProto shape_proto;
ConvertToTensorShapeProto(ranked_tensor_type.getShape(), &shape_proto);
*arg->mutable_shape() = std::move(shape_proto);
} else {
arg->mutable_shape()->set_unknown_rank(true);
}
if (failed(SetOpSharding(op, input_shardings.getValue()[index],
tensorflow::kInputShardingAttr, index,
arg->mutable_sharding())))
return failure();
auto attr = op.getFuncOp().getArgAttrOfType<mlir::BoolAttr>(
index, replication_attr_name);
arg->set_is_same_data_across_replicas(attr != nullptr && attr.getValue());
arg->mutable_is_bounded_dynamic_dim()->Add(
dynamic_arg_idx_set.contains(index));
}
return success();
}
LogicalResult SetMetadataProtoRetvals(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto output_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kOutputShardingAttr);
if (!output_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kOutputShardingAttr));
if (output_shardings.size() != op.getNumResults())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kOutputShardingAttr,
op.getNumResults(), output_shardings.size()));
for (auto output_sharding_and_idx : llvm::enumerate(output_shardings))
if (failed(SetOpSharding(op, output_sharding_and_idx.value(),
tensorflow::kOutputShardingAttr,
output_sharding_and_idx.index(),
metadata->add_retvals()->mutable_sharding())))
return failure();
return success();
}
}
LogicalResult SetMetadataProtoFromClusterFuncOp(
tf_device::ClusterFuncOp op, int num_replicas, int num_cores_per_replica,
std::optional<xla::DeviceAssignmentProto>&& xla_device_assignment,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
if (auto options_attr =
op->getAttrOfType<StringAttr>("tpu_compile_options_proto")) {
if (!metadata->mutable_compile_options()->ParseFromArray(
options_attr.data(), options_attr.size())) {
return failure();
}
}
metadata->set_num_replicas(num_replicas);
metadata->set_num_cores_per_replica(num_cores_per_replica);
if (failed(SetMetadataProtoStepMarkerLocation(op, metadata)))
return failure();
if (xla_device_assignment.has_value())
*metadata->mutable_device_assignment() =
std::move(xla_device_assignment.value());
auto use_spmd_attr = op->getAttrOfType<BoolAttr>(kUseXlaSpmdAttr);
if (!use_spmd_attr)
return op.emitOpError(CreateMissingAttributeMsg(kUseXlaSpmdAttr));
metadata->set_use_spmd_for_xla_partitioning(use_spmd_attr.getValue());
if (failed(SetMetadataProtoArgs(op, metadata))) return failure();
return SetMetadataProtoRetvals(op, metadata);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFTPU {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/");
}
class TpuMetadataUtilsTest : public ::testing::Test {
public:
TpuMetadataUtilsTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::StatusOr<std::vector<mlir::tf_device::ClusterFuncOp>> GetClusterFuncOps(
absl::string_view mlir_module_filename) {
TF_RETURN_IF_ERROR(CreateMlirModule(mlir_module_filename));
std::vector<mlir::tf_device::ClusterFuncOp> cluster_func_ops;
mlir_module_->walk([&](mlir::tf_device::ClusterFuncOp op) {
cluster_func_ops.push_back(op);
});
return cluster_func_ops;
}
private:
absl::Status CreateMlirModule(absl::string_view mlir_module_filename) {
std::string mlir_module_path =
absl::StrCat(TestDataPath(), mlir_module_filename);
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TpuMetadataUtilsTest, SingleDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("basic_cluster.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 1, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
num_replicas: 1 num_cores_per_replica: 1
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST_F(TpuMetadataUtilsTest, spmd) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("spmd.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 2, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape { unknown_rank: true }
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
}
}
} |
60 | #ifndef TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_
#define TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/graph/algorithm.h"
namespace tensorflow {
extern const char* const kXlaClusterAttr;
extern const char* const kXlaCompileTimeConstantInputsAttr;
using OrderedNodeSet = std::set<Node*, NodeComparatorID>;
bool HasForwardedRefInput(const Node& node);
absl::StatusOr<bool> CreateCycleDetectionGraph(const Graph* graph,
GraphCycles* cycles);
std::optional<absl::string_view> GetXlaClusterForNode(const Node& node);
void RemoveFromXlaCluster(NodeDef* node_def);
void RemoveFromXlaCluster(Node* node);
bool HasResourceInputOrOutput(const Node& node);
OptimizerOptions::GlobalJitLevel GetGlobalJitLevelForGraph(
const GraphOptimizationPassOptions& options);
bool IsSingleGpuGraph(const Graph& g);
bool MayCallFunction(const Node& n, const FunctionLibraryDefinition* flib_def);
bool IsShapeConsumerOp(const Node& node);
XlaAutoClusteringSummary GetXlaAutoClusteringSummary(const Graph& graph);
absl::StatusOr<absl::flat_hash_set<Node*>> GetNodesRelatedToRefVariables(
const Graph& graph, FunctionLibraryRuntime* lib_runtime);
absl::StatusOr<std::string> SerializeGraphDeterministic(const Graph& graph);
absl::StatusOr<uint64> FingerprintGraph(const Graph& graph);
}
#endif
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include <string>
#include <unordered_map>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/jit/flags.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/xla_config_registry.h"
namespace tensorflow {
const char* const kXlaClusterAttr = "_XlaCluster";
const char* const kXlaCompileTimeConstantInputsAttr =
"_XlaCompileTimeConstantInputs";
namespace {
string DescribeCycle(const GraphCycles* cycles, const Graph& graph, int src,
int dst) {
int32_t max_path_size = graph.num_node_ids() + 1;
std::vector<int32> path(max_path_size);
int32_t path_size = cycles->FindPath(dst, src, max_path_size, path.data());
if (path_size == 0) {
return "";
}
auto node_name = [&graph](int node_id) {
if (!FastBoundsCheck(node_id, graph.num_node_ids())) {
return string("(null)");
}
auto* node = graph.FindNodeId(node_id);
if (node == nullptr) {
return string("(null)");
}
return node->name();
};
string description;
absl::StrAppend(&description, "Edge from ", node_name(src), " to ",
node_name(dst), " would create a cycle.\n");
path.resize(path_size);
for (int32_t node_id : path) {
string ascii_art;
if (node_id == dst) {
ascii_art = "+-> ";
} else if (node_id != src) {
ascii_art = "| ";
} else {
ascii_art = "+-- ";
}
absl::StrAppend(&description, ascii_art, node_name(node_id), "\n");
}
return description;
}
bool AlwaysForwardsRefInput(const Node& node) { return node.IsIdentity(); }
}
bool HasForwardedRefInput(const Node& node) {
if (AlwaysForwardsRefInput(node)) {
for (const Edge* incoming_edge : node.in_edges()) {
if (incoming_edge->IsControlEdge()) {
continue;
}
Node* incoming_node = incoming_edge->src();
if (IsRefType(incoming_node->output_type(incoming_edge->src_output()))) {
VLOG(2) << "Node " << node.def().ShortDebugString() << " has ref input "
<< incoming_node->name() << " " << incoming_node->type_string();
return true;
}
}
}
return false;
}
absl::StatusOr<bool> CreateCycleDetectionGraph(const Graph* graph,
GraphCycles* cycles) {
for (int i = 0; i < graph->num_node_ids(); ++i) {
CHECK_EQ(i, cycles->NewNode());
}
std::vector<ControlFlowInfo> control_flow_info;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph, &control_flow_info));
std::unordered_map<string, int> frame_nodes;
auto GetOrAddFrameNodeId = [&frame_nodes, cycles](const string& frame_name) {
int& frame_id = frame_nodes.emplace(frame_name, -1).first->second;
if (frame_id < 0) {
frame_id = cycles->NewNode();
}
return frame_id;
};
for (Edge const* edge : graph->edges()) {
if (edge->dst()->IsEnter() || edge->src()->IsExit()) {
const char* src_type = "pre-enter";
const char* dst_type = "post-exit";
int src = edge->src()->id();
int dst = edge->dst()->id();
if (edge->dst()->IsEnter()) {
const string& frame_name =
control_flow_info[edge->dst()->id()].frame_name;
dst = GetOrAddFrameNodeId(frame_name);
dst_type = "frame";
}
if (edge->src()->IsExit()) {
const string& frame_name =
control_flow_info[edge->src()->id()].frame_name;
src = GetOrAddFrameNodeId(frame_name);
src_type = "frame";
}
if (!cycles->InsertEdge(src, dst)) {
VLOG(1) << "Cycle detected when adding " << src_type << "->" << dst_type
<< " edge: " << DescribeCycle(cycles, *graph, src, dst);
return false;
}
continue;
}
if (edge->src()->IsNextIteration()) {
continue;
}
if (!cycles->InsertEdge(edge->src()->id(), edge->dst()->id())) {
return errors::Internal(
"Found cycle in graph without control flow operator during XLA "
"compilation: ",
DescribeCycle(cycles, *graph, edge->src()->id(), edge->dst()->id()));
}
}
return true;
}
std::optional<absl::string_view> GetXlaClusterForNode(const Node& node) {
const AttrValue* attr_value = node.attrs().Find(kXlaClusterAttr);
if (attr_value == nullptr) {
return std::nullopt;
}
Status s = AttrValueHasType(*attr_value, "string");
if (!s.ok()) {
return std::nullopt;
}
return attr_value->s();
}
bool HasResourceInputOrOutput(const Node& node) {
return std::find(node.input_types().begin(), node.input_types().end(),
DT_RESOURCE) != node.input_types().end() ||
std::find(node.output_types().begin(), node.output_types().end(),
DT_RESOURCE) != node.output_types().end();
}
void RemoveFromXlaCluster(NodeDef* node_def) {
node_def->mutable_attr()->erase(kXlaClusterAttr);
}
void RemoveFromXlaCluster(Node* node) { node->ClearAttr(kXlaClusterAttr); }
namespace {
typedef xla_config_registry::XlaGlobalJitLevel XlaGlobalJitLevel;
XlaGlobalJitLevel GetXlaGlobalJitLevel(
const OptimizerOptions::GlobalJitLevel& jit_level_in_session_opts) {
XlaGlobalJitLevel result;
if (jit_level_in_session_opts == OptimizerOptions::DEFAULT) {
result.single_gpu = result.general = OptimizerOptions::OFF;
} else {
result.single_gpu = result.general = jit_level_in_session_opts;
}
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
if (flags->xla_auto_jit_flag.optimization_level_single_gpu !=
OptimizerOptions::DEFAULT) {
result.single_gpu = static_cast<OptimizerOptions::GlobalJitLevel>(
flags->xla_auto_jit_flag.optimization_level_single_gpu);
}
if (flags->xla_auto_jit_flag.optimization_level_general !=
OptimizerOptions::DEFAULT) {
result.general = static_cast<OptimizerOptions::GlobalJitLevel>(
flags->xla_auto_jit_flag.optimization_level_general);
}
return result;
}
int GetGpuNumber(const string& device_name) {
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_name)) {
return -1;
}
return parsed_name.type == DEVICE_GPU ? parsed_name.id : -1;
}
}
bool IsSingleGpuGraph(const Graph& g) {
int gpus_seen = 0;
absl::flat_hash_set<string> devices_seen;
for (Node* n : g.op_nodes()) {
if (devices_seen.contains(n->assigned_device_name())) {
continue;
}
int gpu_number = GetGpuNumber(n->assigned_device_name());
if (gpu_number != -1) {
if (++gpus_seen > 1) {
return false;
}
}
devices_seen.insert(n->assigned_device_name());
}
return gpus_seen == 1;
}
OptimizerOptions::GlobalJitLevel GetGlobalJitLevelForGraph(
const GraphOptimizationPassOptions& options) {
OptimizerOptions::GlobalJitLevel jit_level_in_session_opts =
options.session_options->config.graph_options()
.optimizer_options()
.global_jit_level();
XlaGlobalJitLevel xla_global_jit_level =
GetXlaGlobalJitLevel(jit_level_in_session_opts);
if (xla_global_jit_level.single_gpu == xla_global_jit_level.general) {
VLOG(4) << "GetGlobalJitLevelForGraph returning "
<< xla_global_jit_level.single_gpu;
return xla_global_jit_level.single_gpu;
}
OptimizerOptions::GlobalJitLevel result =
IsSingleGpuGraph(**options.graph) ? xla_global_jit_level.single_gpu
: xla_global_jit_level.general;
VLOG(4) << "GetGlobalJitLevelForGraph returning " << result;
return result;
}
bool MayCallFunction(const Node& n, const FunctionLibraryDefinition* flib_def) {
if (flib_def->Contains(n.type_string())) {
return true;
}
return absl::c_any_of(n.def().attr(),
[](const std::pair<string, AttrValue>& name_attr_pair) {
return name_attr_pair.second.has_func();
});
}
bool IsShapeConsumerOp(const Node& node) {
return node.type_string() == "Shape" || node.type_string() == "Rank" ||
node.type_string() == "Size";
}
namespace {
struct ClusterInfo {
int size;
absl::flat_hash_map<absl::string_view, int> op_histogram;
};
void HistogramMapToRepeatedOpAndCount(
protobuf::RepeatedPtrField<XlaAutoClusteringSummary::OpAndCount>* result,
const absl::flat_hash_map<absl::string_view, int>& histogram) {
for (const auto& pair : histogram) {
XlaAutoClusteringSummary::OpAndCount* new_entry = result->Add();
new_entry->set_op(std::string(pair.first));
new_entry->set_count(pair.second);
}
absl::c_sort(*result, [](const XlaAutoClusteringSummary::OpAndCount& a,
const XlaAutoClusteringSummary::OpAndCount& b) {
return a.op() < b.op();
});
}
void ClusterInfoToProtobuf(XlaAutoClusteringSummary::Cluster* result,
absl::string_view name, const ClusterInfo& info) {
result->set_name(std::string(name));
result->set_size(info.size);
HistogramMapToRepeatedOpAndCount(result->mutable_op_histogram(),
info.op_histogram);
}
}
XlaAutoClusteringSummary GetXlaAutoClusteringSummary(const Graph& graph) {
absl::flat_hash_map<absl::string_view, ClusterInfo> cluster_name_to_info;
XlaAutoClusteringSummary result;
absl::flat_hash_map<absl::string_view, int> unclustered_op_histogram;
for (Node* n : graph.nodes()) {
std::optional<absl::string_view> cluster_name = GetXlaClusterForNode(*n);
if (cluster_name) {
result.set_clustered_node_count(result.clustered_node_count() + 1);
ClusterInfo* info = &cluster_name_to_info[*cluster_name];
info->size++;
info->op_histogram[n->type_string()]++;
} else {
result.set_unclustered_node_count(result.unclustered_node_count() + 1);
unclustered_op_histogram[n->type_string()]++;
}
}
for (const auto& pair : cluster_name_to_info) {
XlaAutoClusteringSummary::Cluster* new_cluster = result.add_clusters();
ClusterInfoToProtobuf(new_cluster, pair.first, pair.second);
}
absl::c_sort(*result.mutable_clusters(),
[&](const XlaAutoClusteringSummary::Cluster& a,
const XlaAutoClusteringSummary::Cluster& b) {
return a.name() < b.name();
});
HistogramMapToRepeatedOpAndCount(result.mutable_unclustered_op_histogram(),
unclustered_op_histogram);
return result;
}
namespace {
using CallTargetListTy = absl::InlinedVector<NameAttrList, 2>;
CallTargetListTy GetCallTargetListFromNode(
const Node& n, FunctionLibraryRuntime* lib_runtime) {
const FunctionLibraryDefinition& flib_def =
*lib_runtime->GetFunctionLibraryDefinition();
if (flib_def.Find(n.type_string())) {
NameAttrList callee;
callee.set_name(n.type_string());
*callee.mutable_attr() = n.def().attr();
return {callee};
}
CallTargetListTy result;
for (const auto& name_attr_pair : n.attrs()) {
const AttrValue& attr_value = name_attr_pair.second;
if (attr_value.value_case() == AttrValue::kFunc) {
result.push_back(attr_value.func());
} else if (attr_value.value_case() == AttrValue::kList) {
result.insert(result.end(), attr_value.list().func().begin(),
attr_value.list().func().end());
}
}
return result;
}
enum class Direction { kForward, kBackward };
Status GetNodesRelatedToRefVariablesInDirection(
const Graph& graph, FunctionLibraryRuntime* lib_runtime,
Direction direction, int depth, absl::flat_hash_set<Node*>* result);
absl::StatusOr<bool> DoesAnyCalleeHaveRefNodes(
const CallTargetListTy& call_target_list,
FunctionLibraryRuntime* lib_runtime, Direction direction, int depth) {
const int kMaxDepth = 10;
if (depth == kMaxDepth && !call_target_list.empty()) {
return true;
}
absl::flat_hash_set<Node*> callee_ref_nodes;
for (const NameAttrList& call_target : call_target_list) {
const OpRegistrationData* op_reg;
if (OpRegistry::Global()->LookUp(call_target.name(), &op_reg).ok()) {
const OpDef& op = op_reg->op_def;
if (absl::c_any_of(op.output_arg(), [](const OpDef::ArgDef arg) {
return arg.is_ref();
})) {
return true;
}
continue;
}
callee_ref_nodes.clear();
FunctionLibraryRuntime::Handle handle;
if (!lib_runtime
->Instantiate(call_target.name(), AttrSlice(&call_target.attr()),
&handle)
.ok()) {
VLOG(2) << "Could not find " << call_target.name()
<< " in the function library.";
return true;
}
auto release_handle_on_return = gtl::MakeCleanup(
[&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); });
const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle);
TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection(
*fbody->graph, lib_runtime, direction, depth + 1, &callee_ref_nodes));
if (!callee_ref_nodes.empty()) {
return true;
}
}
return false;
}
Status GetNodesRelatedToRefVariablesInDirection(
const Graph& graph, FunctionLibraryRuntime* lib_runtime,
Direction direction, int depth, absl::flat_hash_set<Node*>* result) {
std::vector<Node*> nodes_in_order;
if (direction == Direction::kForward) {
GetReversePostOrder(graph, &nodes_in_order,
NodeComparatorName());
} else {
GetPostOrder(graph, &nodes_in_order,
NodeComparatorName());
}
size_t old_result_size;
int iterations = 0;
const int kMaxIterations = 10 * 1000;
std::vector<bool> callee_has_ref_nodes_cache;
callee_has_ref_nodes_cache.resize(graph.num_node_ids());
auto does_callee_have_ref_nodes = [&](Node* n) -> absl::StatusOr<bool> {
if (iterations == 1) {
TF_ASSIGN_OR_RETURN(
bool callee_has_ref_nodes,
DoesAnyCalleeHaveRefNodes(GetCallTargetListFromNode(*n, lib_runtime),
lib_runtime, direction, depth));
callee_has_ref_nodes_cache[n->id()] = callee_has_ref_nodes;
return callee_has_ref_nodes;
} else {
return {callee_has_ref_nodes_cache[n->id()]};
}
};
do {
TF_RET_CHECK(iterations++ < kMaxIterations) << "infinite loop?";
old_result_size = result->size();
for (Node* n : nodes_in_order) {
if (n->IsSource() || n->IsSink()) {
continue;
}
bool inserted_n = false;
const EdgeSet& edges =
direction == Direction::kForward ? n->in_edges() : n->out_edges();
for (const Edge* e : edges) {
if (result->contains(direction == Direction::kForward ? e->src()
: e->dst())) {
result->insert(n);
inserted_n = true;
break;
}
}
if (inserted_n) {
continue;
}
if (direction == Direction::kForward &&
absl::c_any_of(n->output_types(), IsRefType)) {
result->insert(n);
continue;
}
TF_ASSIGN_OR_RETURN(bool callee_has_ref_nodes,
does_callee_have_ref_nodes(n));
if (callee_has_ref_nodes) {
result->insert(n);
continue;
}
}
} while (result->size() != old_result_size);
VLOG(2) << "# iterations = " << iterations;
return absl::OkStatus();
}
void SortControlInputs(GraphDef* gdef) {
int64_t num_nodes = gdef->node_size();
for (int64_t i = 0; i < num_nodes; ++i) {
NodeDef* node = gdef->mutable_node(i);
std::stable_sort(node->mutable_input()->begin(),
node->mutable_input()->end(),
[](const string& a, const string& b) {
bool a_is_control = absl::StartsWith(a, "^");
bool b_is_control = absl::StartsWith(b, "^");
return (!a_is_control && b_is_control) ||
(a_is_control && b_is_control && a < b);
});
}
}
}
absl::StatusOr<absl::flat_hash_set<Node*>> GetNodesRelatedToRefVariables(
const Graph& graph, FunctionLibraryRuntime* lib_runtime) {
absl::flat_hash_set<Node*> result;
TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection(
graph, lib_runtime, Direction::kForward, 0, &result));
TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection(
graph, lib_runtime, Direction::kBackward, 0, &result));
VLOG(1) << "GetNodesRelatedToRefVariables() found " << result.size()
<< " nodes";
return result;
}
absl::StatusOr<std::string> SerializeGraphDeterministic(const Graph& graph) {
GraphDef def;
graph.ToGraphDef(&def);
SortControlInputs(&def);
std::string s;
if (!SerializeToStringDeterministic(def, &s)) {
return errors::Internal("Failed to serialize graphdef.");
}
return s;
}
absl::StatusOr<uint64> FingerprintGraph(const Graph& graph) {
TF_ASSIGN_OR_RETURN(std::string serialized,
SerializeGraphDeterministic(graph));
return Hash64(serialized.data(), serialized.size());
}
REGISTER_XLA_CONFIG_GETTER(GetXlaGlobalJitLevel);
} | #include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "absl/algorithm/container.h"
#include "absl/strings/str_join.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(CreateCycleDetectionGraph, ConnectivityThroughEnterExitRegion) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output enter =
ops::internal::Enter(root.WithOpName("enter"), a, "only_frame");
Output exit = ops::internal::Exit(root.WithOpName("exit"), enter);
Output b = ops::Add(root.WithOpName("b"), a, exit);
FixupSourceAndSinkEdges(root.graph());
GraphCycles cycles;
TF_ASSERT_OK(CreateCycleDetectionGraph(root.graph(), &cycles).status());
EXPECT_FALSE(cycles.CanContractEdge(a.node()->id(), b.node()->id()));
}
TEST(CreateCycleDetectionGraph, ConnectivityThroughMultipleEnterExitRegions) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output enter_0 =
ops::internal::Enter(root.WithOpName("enter_0"), a, "frame_0");
Output exit_0 = ops::internal::Exit(root.WithOpName("exit_0"), enter_0);
Output enter_1 =
ops::internal::Enter(root.WithOpName("enter_1"), a, "frame_1");
Output exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1);
Output b = ops::Add(root.WithOpName("b"), a, exit_1);
FixupSourceAndSinkEdges(root.graph());
GraphCycles cycles;
TF_ASSERT_OK(CreateCycleDetectionGraph(root.graph(), &cycles).status());
EXPECT_FALSE(cycles.CanContractEdge(a.node()->id(), b.node()->id()));
}
TEST(CreateCycleDetectionGraph, ReachingEnterExit) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output enter_0 =
ops::internal::Enter(root.WithOpName("enter_0"), a, "frame_0");
Output exit_0 = ops::internal::Exit(root.WithOpName("exit_0"), enter_0);
Output add = ops::Add(root.WithOpName("add"), exit_0, exit_0);
Output enter_1 =
ops::internal::Enter(root.WithOpName("enter_1"), add, "frame_0");
Output exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1);
FixupSourceAndSinkEdges(root.graph());
GraphCycles cycles;
TF_ASSERT_OK_AND_ASSIGN(bool ok,
CreateCycleDetectionGraph(root.graph(), &cycles));
EXPECT_FALSE(ok);
}
const char* kCPU0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const char* kGPU0 = "/job:localhost/replica:0/task:0/device:GPU:0";
const char* kGPU1 = "/job:localhost/replica:0/task:0/device:GPU:1";
TEST(IsSingleGpuGraph, ReturnsTrue) {
Scope root = Scope::NewRootScope().WithAssignedDevice(kGPU0).ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output b = ops::Add(root.WithOpName("b"), a, a);
Output c = ops::Add(root.WithOpName("c"), b, b);
FixupSourceAndSinkEdges(root.graph());
EXPECT_TRUE(IsSingleGpuGraph(*root.graph()));
}
TEST(IsSingleGpuGraph, ReturnsFalseForCpuGraph) {
Scope root = Scope::NewRootScope().WithAssignedDevice(kCPU0).ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output b = ops::Add(root.WithOpName("b"), a, a);
Output c = ops::Add(root.WithOpName("c"), b, b);
FixupSourceAndSinkEdges(root.graph());
EXPECT_FALSE(IsSingleGpuGraph(*root.graph()));
}
TEST(IsSingleGpuGraph, ReturnsFalseForMultiGpuGraph) {
Scope root = Scope::NewRootScope().WithAssignedDevice(kGPU0).ExitOnError();
Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0));
Output b = ops::Add(root.WithOpName("b").WithAssignedDevice(kGPU1), a, a);
Output c = ops::Add(root.WithOpName("c"), b, b);
FixupSourceAndSinkEdges(root.graph());
EXPECT_FALSE(IsSingleGpuGraph(*root.graph()));
}
absl::StatusOr<std::vector<string>> GetNodesRelatedToRefVarsSorted(
const Scope& scope, FunctionLibraryDefinition* flib_def = nullptr) {
FunctionDefLibrary flib;
FunctionLibraryDefinition flib_def_local(OpRegistry::Global(), flib);
if (flib_def == nullptr) {
flib_def = &flib_def_local;
}
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION,
flib_def, OptimizerOptions{}));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Node*> nodes_related_to_ref_vars,
GetNodesRelatedToRefVariables(*graph, lib_runtime));
std::vector<string> names;
absl::c_transform(nodes_related_to_ref_vars, std::back_inserter(names),
[](Node* n) { return n->name(); });
absl::c_sort(names);
return names;
}
void CreateSubgraphTouchingRefVar(const Scope& s) {
Output variable =
ops::Variable(s.WithOpName("variable"), PartialTensorShape{}, DT_FLOAT);
Output read = ops::Identity(s.WithOpName("read_ref_var"), variable);
Output neg = ops::Negate(s.WithOpName("negate_ref"), read);
Output add = ops::Add(s.WithOpName("add_ref"), neg, neg);
Output constant =
ops::Const(s.WithOpName("constant_ref"), Input::Initializer(0.0));
s.graph()->AddControlEdge(constant.node(), variable.node());
}
void CreateSubgraphNotTouchingRefVar(const Scope& s) {
Output constant =
ops::Const(s.WithOpName("constant_normal"), Input::Initializer(0.0));
Output neg = ops::Negate(s.WithOpName("negate_normal"), constant);
Output add = ops::Add(s.WithOpName("add_normal"), neg, neg);
}
void CreateSubgraphCallingFunctionWithRefVar(const Scope& s) {
NameAttrList ref_float_function;
ref_float_function.set_name("RefFloatFn");
ops::PartitionedCall call(s.WithOpName("RefFloat"), {absl::Span<Input>{}},
{DT_FLOAT}, ref_float_function);
Output constant =
ops::Const(s.WithOpName("constant_ref_pco"), Input::Initializer(0.0));
s.graph()->AddControlEdge(call.operation.node(), constant.node());
}
void CreateSubgraphCallingFunctionWithoutRefVar(const Scope& s) {
NameAttrList regular_float_function;
regular_float_function.set_name("RegularFloatFn");
ops::PartitionedCall call(s.WithOpName("RegularFloat"), {absl::Span<Input>{}},
{DT_FLOAT}, regular_float_function);
Output constant =
ops::Const(s.WithOpName("constant_normal_pco"), Input::Initializer(0.0));
s.graph()->AddControlEdge(call.operation.node(), constant.node());
}
void AddRefFunctionFunctionDef(FunctionDefLibrary* fdef_lib) {
FunctionDef make_ref_float = FunctionDefHelper::Define(
"RefFloatFn", {}, {"r:float"}, {},
{{{"var"},
"VariableV2",
{},
{{"dtype", DT_FLOAT}, {"shape", TensorShape({})}}},
{{"r"}, "Identity", {"var"}, {{"T", DT_FLOAT}}}});
*fdef_lib->add_function() = make_ref_float;
}
void AddRegularFunctionFunctionDef(FunctionDefLibrary* fdef_lib) {
Tensor seven(DT_FLOAT, {});
seven.scalar<float>()() = 7;
FunctionDef make_regular_float = FunctionDefHelper::Define(
"RegularFloatFn", {}, {"r:float"}, {},
{{{"r"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", seven}}}});
*fdef_lib->add_function() = make_regular_float;
}
TEST(NodesRelatedToRefVariables, Basic) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib;
CreateSubgraphTouchingRefVar(root);
CreateSubgraphNotTouchingRefVar(root);
AddRefFunctionFunctionDef(&fdef_lib);
CreateSubgraphCallingFunctionWithRefVar(root);
AddRegularFunctionFunctionDef(&fdef_lib);
CreateSubgraphCallingFunctionWithoutRefVar(root);
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
TF_ASSERT_OK_AND_ASSIGN(std::vector<string> names,
GetNodesRelatedToRefVarsSorted(root, &flib_def));
std::vector<string> expected({
"RefFloat",
"add_ref",
"constant_ref",
"constant_ref_pco",
"negate_ref",
"read_ref_var",
"variable",
});
EXPECT_EQ(names, expected);
}
Status MakeLoop(Scope s, Output init_value, absl::string_view loop_name) {
s = s.NewSubScope(std::string(loop_name));
ops::internal::Enter enter(s.WithOpName("init_value"), init_value, loop_name);
ops::Merge merge(s.WithOpName("merge"), {init_value, init_value});
Output next_iteration =
ops::NextIteration(s.WithOpName("next_itr"), merge.output);
return s.graph()->UpdateEdge(next_iteration.node(), 0, merge.output.node(),
1);
}
TEST(NodesRelatedToRefVariables, Cycles) {
Scope root = Scope::NewRootScope().ExitOnError();
Output variable = ops::Variable(root.WithOpName("variable"),
PartialTensorShape{}, DT_FLOAT);
TF_ASSERT_OK(
MakeLoop(root, ops::Identity(root.WithOpName("read_ref_var"), variable),
"ref_loop"));
TF_ASSERT_OK(MakeLoop(
root, ops::Const(root.WithOpName("constant"), Input::Initializer(0.0)),
"normal_loop"));
TF_ASSERT_OK_AND_ASSIGN(std::vector<string> names,
GetNodesRelatedToRefVarsSorted(root));
std::vector<string> expected({"read_ref_var", "ref_loop/init_value",
"ref_loop/merge", "ref_loop/next_itr",
"variable"});
EXPECT_EQ(names, expected);
}
}
} |
61 | #ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_CUBIC_BYTES_H_
#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_CUBIC_BYTES_H_
#include <cstdint>
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/core/quic_clock.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_export.h"
namespace quic {
namespace test {
class CubicBytesTest;
}
class QUICHE_EXPORT CubicBytes {
public:
explicit CubicBytes(const QuicClock* clock);
CubicBytes(const CubicBytes&) = delete;
CubicBytes& operator=(const CubicBytes&) = delete;
void SetNumConnections(int num_connections);
void ResetCubicState();
QuicByteCount CongestionWindowAfterPacketLoss(QuicPacketCount current);
QuicByteCount CongestionWindowAfterAck(QuicByteCount acked_bytes,
QuicByteCount current,
QuicTime::Delta delay_min,
QuicTime event_time);
void OnApplicationLimited();
private:
friend class test::CubicBytesTest;
static const QuicTime::Delta MaxCubicTimeInterval() {
return QuicTime::Delta::FromMilliseconds(30);
}
float Alpha() const;
float Beta() const;
float BetaLastMax() const;
QuicByteCount last_max_congestion_window() const {
return last_max_congestion_window_;
}
const QuicClock* clock_;
int num_connections_;
QuicTime epoch_;
QuicByteCount last_max_congestion_window_;
QuicByteCount acked_bytes_count_;
QuicByteCount estimated_tcp_congestion_window_;
QuicByteCount origin_point_congestion_window_;
uint32_t time_to_origin_point_;
QuicByteCount last_target_congestion_window_;
};
}
#endif
#include "quiche/quic/core/congestion_control/cubic_bytes.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
const int kCubeScale = 40;
const int kCubeCongestionWindowScale = 410;
const uint64_t kCubeFactor =
(UINT64_C(1) << kCubeScale) / kCubeCongestionWindowScale / kDefaultTCPMSS;
const float kDefaultCubicBackoffFactor = 0.7f;
const float kBetaLastMax = 0.85f;
}
CubicBytes::CubicBytes(const QuicClock* clock)
: clock_(clock),
num_connections_(kDefaultNumConnections),
epoch_(QuicTime::Zero()) {
ResetCubicState();
}
void CubicBytes::SetNumConnections(int num_connections) {
num_connections_ = num_connections;
}
float CubicBytes::Alpha() const {
const float beta = Beta();
return 3 * num_connections_ * num_connections_ * (1 - beta) / (1 + beta);
}
float CubicBytes::Beta() const {
return (num_connections_ - 1 + kDefaultCubicBackoffFactor) / num_connections_;
}
float CubicBytes::BetaLastMax() const {
return (num_connections_ - 1 + kBetaLastMax) / num_connections_;
}
void CubicBytes::ResetCubicState() {
epoch_ = QuicTime::Zero();
last_max_congestion_window_ = 0;
acked_bytes_count_ = 0;
estimated_tcp_congestion_window_ = 0;
origin_point_congestion_window_ = 0;
time_to_origin_point_ = 0;
last_target_congestion_window_ = 0;
}
void CubicBytes::OnApplicationLimited() {
epoch_ = QuicTime::Zero();
}
QuicByteCount CubicBytes::CongestionWindowAfterPacketLoss(
QuicByteCount current_congestion_window) {
if (current_congestion_window + kDefaultTCPMSS <
last_max_congestion_window_) {
last_max_congestion_window_ =
static_cast<int>(BetaLastMax() * current_congestion_window);
} else {
last_max_congestion_window_ = current_congestion_window;
}
epoch_ = QuicTime::Zero();
return static_cast<int>(current_congestion_window * Beta());
}
QuicByteCount CubicBytes::CongestionWindowAfterAck(
QuicByteCount acked_bytes, QuicByteCount current_congestion_window,
QuicTime::Delta delay_min, QuicTime event_time) {
acked_bytes_count_ += acked_bytes;
if (!epoch_.IsInitialized()) {
QUIC_DVLOG(1) << "Start of epoch";
epoch_ = event_time;
acked_bytes_count_ = acked_bytes;
estimated_tcp_congestion_window_ = current_congestion_window;
if (last_max_congestion_window_ <= current_congestion_window) {
time_to_origin_point_ = 0;
origin_point_congestion_window_ = current_congestion_window;
} else {
time_to_origin_point_ = static_cast<uint32_t>(
cbrt(kCubeFactor *
(last_max_congestion_window_ - current_congestion_window)));
origin_point_congestion_window_ = last_max_congestion_window_;
}
}
int64_t elapsed_time =
((event_time + delay_min - epoch_).ToMicroseconds() << 10) /
kNumMicrosPerSecond;
uint64_t offset = std::abs(time_to_origin_point_ - elapsed_time);
QuicByteCount delta_congestion_window = (kCubeCongestionWindowScale * offset *
offset * offset * kDefaultTCPMSS) >>
kCubeScale;
const bool add_delta = elapsed_time > time_to_origin_point_;
QUICHE_DCHECK(add_delta ||
(origin_point_congestion_window_ > delta_congestion_window));
QuicByteCount target_congestion_window =
add_delta ? origin_point_congestion_window_ + delta_congestion_window
: origin_point_congestion_window_ - delta_congestion_window;
target_congestion_window =
std::min(target_congestion_window,
current_congestion_window + acked_bytes_count_ / 2);
QUICHE_DCHECK_LT(0u, estimated_tcp_congestion_window_);
estimated_tcp_congestion_window_ += acked_bytes_count_ *
(Alpha() * kDefaultTCPMSS) /
estimated_tcp_congestion_window_;
acked_bytes_count_ = 0;
last_target_congestion_window_ = target_congestion_window;
if (target_congestion_window < estimated_tcp_congestion_window_) {
target_congestion_window = estimated_tcp_congestion_window_;
}
QUIC_DVLOG(1) << "Final target congestion_window: "
<< target_congestion_window;
return target_congestion_window;
}
} | #include "quiche/quic/core/congestion_control/cubic_bytes.h"
#include <cmath>
#include <cstdint>
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
namespace test {
namespace {
const float kBeta = 0.7f;
const float kBetaLastMax = 0.85f;
const uint32_t kNumConnections = 2;
const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections;
const float kNConnectionBetaLastMax =
(kNumConnections - 1 + kBetaLastMax) / kNumConnections;
const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections *
(1 - kNConnectionBeta) / (1 + kNConnectionBeta);
}
class CubicBytesTest : public QuicTest {
protected:
CubicBytesTest()
: one_ms_(QuicTime::Delta::FromMilliseconds(1)),
hundred_ms_(QuicTime::Delta::FromMilliseconds(100)),
cubic_(&clock_) {}
QuicByteCount RenoCwndInBytes(QuicByteCount current_cwnd) {
QuicByteCount reno_estimated_cwnd =
current_cwnd +
kDefaultTCPMSS * (kNConnectionAlpha * kDefaultTCPMSS) / current_cwnd;
return reno_estimated_cwnd;
}
QuicByteCount ConservativeCwndInBytes(QuicByteCount current_cwnd) {
QuicByteCount conservative_cwnd = current_cwnd + kDefaultTCPMSS / 2;
return conservative_cwnd;
}
QuicByteCount CubicConvexCwndInBytes(QuicByteCount initial_cwnd,
QuicTime::Delta rtt,
QuicTime::Delta elapsed_time) {
const int64_t offset =
((elapsed_time + rtt).ToMicroseconds() << 10) / 1000000;
const QuicByteCount delta_congestion_window =
((410 * offset * offset * offset) * kDefaultTCPMSS >> 40);
const QuicByteCount cubic_cwnd = initial_cwnd + delta_congestion_window;
return cubic_cwnd;
}
QuicByteCount LastMaxCongestionWindow() {
return cubic_.last_max_congestion_window();
}
QuicTime::Delta MaxCubicTimeInterval() {
return cubic_.MaxCubicTimeInterval();
}
const QuicTime::Delta one_ms_;
const QuicTime::Delta hundred_ms_;
MockClock clock_;
CubicBytes cubic_;
};
TEST_F(CubicBytesTest, AboveOriginWithTighterBounds) {
const QuicTime::Delta rtt_min = hundred_ms_;
int64_t rtt_min_ms = rtt_min.ToMilliseconds();
float rtt_min_s = rtt_min_ms / 1000.0;
QuicByteCount current_cwnd = 10 * kDefaultTCPMSS;
const QuicByteCount initial_cwnd = current_cwnd;
clock_.AdvanceTime(one_ms_);
const QuicTime initial_time = clock_.ApproximateNow();
const QuicByteCount expected_first_cwnd = RenoCwndInBytes(current_cwnd);
current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
rtt_min, initial_time);
ASSERT_EQ(expected_first_cwnd, current_cwnd);
const int max_reno_rtts =
std::sqrt(kNConnectionAlpha / (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) -
2;
for (int i = 0; i < max_reno_rtts; ++i) {
const uint64_t num_acks_this_epoch =
current_cwnd / kDefaultTCPMSS / kNConnectionAlpha;
const QuicByteCount initial_cwnd_this_epoch = current_cwnd;
for (QuicPacketCount n = 0; n < num_acks_this_epoch; ++n) {
const QuicByteCount expected_next_cwnd = RenoCwndInBytes(current_cwnd);
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
ASSERT_EQ(expected_next_cwnd, current_cwnd);
}
const QuicByteCount cwnd_change_this_epoch =
current_cwnd - initial_cwnd_this_epoch;
ASSERT_NEAR(kDefaultTCPMSS, cwnd_change_this_epoch, kDefaultTCPMSS / 2);
clock_.AdvanceTime(hundred_ms_);
}
for (int i = 0; i < 54; ++i) {
const uint64_t max_acks_this_epoch = current_cwnd / kDefaultTCPMSS;
const QuicTime::Delta interval = QuicTime::Delta::FromMicroseconds(
hundred_ms_.ToMicroseconds() / max_acks_this_epoch);
for (QuicPacketCount n = 0; n < max_acks_this_epoch; ++n) {
clock_.AdvanceTime(interval);
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
ASSERT_EQ(expected_cwnd, current_cwnd);
}
}
const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
ASSERT_EQ(expected_cwnd, current_cwnd);
}
TEST_F(CubicBytesTest, DISABLED_AboveOrigin) {
const QuicTime::Delta rtt_min = hundred_ms_;
QuicByteCount current_cwnd = 10 * kDefaultTCPMSS;
QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd);
clock_.AdvanceTime(one_ms_);
ASSERT_EQ(expected_cwnd,
cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
rtt_min, clock_.ApproximateNow()));
current_cwnd = expected_cwnd;
const QuicPacketCount initial_cwnd = expected_cwnd;
for (int i = 0; i < 48; ++i) {
for (QuicPacketCount n = 1;
n < current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; ++n) {
ASSERT_NEAR(
current_cwnd,
cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min,
clock_.ApproximateNow()),
kDefaultTCPMSS);
}
clock_.AdvanceTime(hundred_ms_);
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
expected_cwnd += kDefaultTCPMSS;
ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS);
}
for (int i = 0; i < 52; ++i) {
for (QuicPacketCount n = 1; n < current_cwnd / kDefaultTCPMSS; ++n) {
ASSERT_NEAR(
current_cwnd,
cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min,
clock_.ApproximateNow()),
kDefaultTCPMSS);
}
clock_.AdvanceTime(hundred_ms_);
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
}
float elapsed_time_s = 10.0f + 0.1f;
expected_cwnd =
initial_cwnd / kDefaultTCPMSS +
(elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024;
EXPECT_EQ(expected_cwnd, current_cwnd / kDefaultTCPMSS);
}
TEST_F(CubicBytesTest, AboveOriginFineGrainedCubing) {
QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS;
const QuicByteCount initial_cwnd = current_cwnd;
const QuicTime::Delta rtt_min = hundred_ms_;
clock_.AdvanceTime(one_ms_);
QuicTime initial_time = clock_.ApproximateNow();
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(600));
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
for (int i = 0; i < 100; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
const QuicByteCount next_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
ASSERT_EQ(expected_cwnd, next_cwnd);
ASSERT_GT(next_cwnd, current_cwnd);
const QuicByteCount cwnd_delta = next_cwnd - current_cwnd;
ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta);
current_cwnd = next_cwnd;
}
}
TEST_F(CubicBytesTest, PerAckUpdates) {
QuicPacketCount initial_cwnd_packets = 150;
QuicByteCount current_cwnd = initial_cwnd_packets * kDefaultTCPMSS;
const QuicTime::Delta rtt_min = 350 * one_ms_;
clock_.AdvanceTime(one_ms_);
QuicByteCount reno_cwnd = RenoCwndInBytes(current_cwnd);
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
const QuicByteCount initial_cwnd = current_cwnd;
const QuicPacketCount max_acks = initial_cwnd_packets / kNConnectionAlpha;
const QuicTime::Delta interval = QuicTime::Delta::FromMicroseconds(
MaxCubicTimeInterval().ToMicroseconds() / (max_acks + 1));
clock_.AdvanceTime(interval);
reno_cwnd = RenoCwndInBytes(reno_cwnd);
ASSERT_EQ(current_cwnd,
cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
rtt_min, clock_.ApproximateNow()));
for (QuicPacketCount i = 1; i < max_acks; ++i) {
clock_.AdvanceTime(interval);
const QuicByteCount next_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
reno_cwnd = RenoCwndInBytes(reno_cwnd);
ASSERT_LT(current_cwnd, next_cwnd);
ASSERT_EQ(reno_cwnd, next_cwnd);
current_cwnd = next_cwnd;
}
const QuicByteCount minimum_expected_increase = kDefaultTCPMSS * .9;
EXPECT_LT(minimum_expected_increase + initial_cwnd, current_cwnd);
}
TEST_F(CubicBytesTest, LossEvents) {
const QuicTime::Delta rtt_min = hundred_ms_;
QuicByteCount current_cwnd = 422 * kDefaultTCPMSS;
QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd);
clock_.AdvanceTime(one_ms_);
EXPECT_EQ(expected_cwnd,
cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
rtt_min, clock_.ApproximateNow()));
QuicByteCount pre_loss_cwnd = current_cwnd;
ASSERT_EQ(0u, LastMaxCongestionWindow());
expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta);
EXPECT_EQ(expected_cwnd,
cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
ASSERT_EQ(pre_loss_cwnd, LastMaxCongestionWindow());
current_cwnd = expected_cwnd;
pre_loss_cwnd = current_cwnd;
expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta);
ASSERT_EQ(expected_cwnd,
cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
current_cwnd = expected_cwnd;
EXPECT_GT(pre_loss_cwnd, LastMaxCongestionWindow());
QuicByteCount expected_last_max =
static_cast<QuicByteCount>(pre_loss_cwnd * kNConnectionBetaLastMax);
EXPECT_EQ(expected_last_max, LastMaxCongestionWindow());
EXPECT_LT(expected_cwnd, LastMaxCongestionWindow());
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
EXPECT_GT(LastMaxCongestionWindow(), current_cwnd);
current_cwnd = LastMaxCongestionWindow() - 1;
pre_loss_cwnd = current_cwnd;
expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta);
EXPECT_EQ(expected_cwnd,
cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
expected_last_max = pre_loss_cwnd;
ASSERT_EQ(expected_last_max, LastMaxCongestionWindow());
}
TEST_F(CubicBytesTest, BelowOrigin) {
const QuicTime::Delta rtt_min = hundred_ms_;
QuicByteCount current_cwnd = 422 * kDefaultTCPMSS;
QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd);
clock_.AdvanceTime(one_ms_);
EXPECT_EQ(expected_cwnd,
cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
rtt_min, clock_.ApproximateNow()));
expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta);
EXPECT_EQ(expected_cwnd,
cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
current_cwnd = expected_cwnd;
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
for (int i = 0; i < 40; ++i) {
clock_.AdvanceTime(hundred_ms_);
current_cwnd = cubic_.CongestionWindowAfterAck(
kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
}
expected_cwnd = 553632;
EXPECT_EQ(expected_cwnd, current_cwnd);
}
}
} |
62 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SELECT_V2_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SELECT_V2_H_
#include <string>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
namespace tflite {
namespace gpu {
GPUOperation CreateSelectV2(const OperationDef& definition,
const SelectV2Attributes& attr = {});
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/select_v2.h"
#include <string>
#include <utility>
namespace tflite {
namespace gpu {
std::string GetSelectV2Code(const OperationDef& op_def,
const SelectV2Attributes& attr, GPUOperation* op) {
op->AddSrcTensor("cond_tensor", op_def.src_tensors[0]);
op->AddSrcTensor("true_tensor", op_def.src_tensors[1]);
op->AddSrcTensor("else_tensor", op_def.src_tensors[2]);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.cond_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += attr.broadcast_true ? "" : " args.true_tensor.SetBatchRef(B);\n";
c += attr.broadcast_false ? "" : " args.else_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int Z = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"Z >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " FLT4 true_val, else_val;\n";
if (!attr.broadcast_true) {
c += " true_val = args.true_tensor.Read(X, Y, Z);\n";
} else {
c += " true_val = INIT_FLT4(args.true_tensor.Read(0, 0, 0, 0).x);\n";
}
if (!attr.broadcast_false) {
c += " else_val = args.else_tensor.Read(X, Y, Z);\n";
} else {
c += " else_val = INIT_FLT4(args.else_tensor.Read(0, 0, 0, 0).x);\n";
}
c += " bool should_gather_rows = \n";
if (attr.broadcast_true && attr.broadcast_false) {
c += " true;\n";
} else {
c += " args.dst_tensor.Slices() != args.cond_tensor.Slices();\n";
}
c += " FLT4 res;\n";
if (attr.scalar_cond) {
c += " bool cond = args.cond_tensor.Read<bool>(0, 0, 0).x;\n";
c += " res = cond ? true_val : else_val;\n";
} else {
c += " if (should_gather_rows) {\n";
c += " bool cond = args.cond_tensor.Read<bool>(X, 0, 0).x;\n";
c += " res = cond ? true_val : else_val;\n";
c += " } else {\n";
c += " bool4 cond = args.cond_tensor.Read<bool>(0, Y, Z);\n";
c += " res = true_val;\n";
c += " res.x = cond.x ? true_val.x : else_val.x;\n";
c += " res.y = cond.y ? true_val.y : else_val.y;\n";
c += " res.z = cond.z ? true_val.z : else_val.z;\n";
c += " res.w = cond.w ? true_val.w : else_val.w;\n";
c += " }\n;";
}
c += " args.dst_tensor.Write(res, X, Y, Z);\n";
c += "}\n";
return c;
}
GPUOperation CreateSelectV2(const OperationDef& definition,
const SelectV2Attributes& attr) {
GPUOperation op(definition);
op.code_ = GetSelectV2Code(definition, attr, &op);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
op.args_.AddInt("broadcast_true", attr.broadcast_true);
op.args_.AddInt("broadcast_else", attr.broadcast_false);
return op;
}
}
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/select_v2_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, SelectV2) {
auto status = SelectV2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2Batch) {
auto status = SelectV2BatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2Channels) {
auto status = SelectV2ChannelsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2ChannelsBatch) {
auto status = SelectV2ChannelsBatchTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2BroadcastTrue) {
auto status = SelectV2BroadcastTrueTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2BroadcastFalse) {
auto status = SelectV2BroadcastFalseTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2BroadcastBoth) {
auto status = SelectV2BroadcastBothTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, SelectV2ChannelsBroadcastFalse) {
auto status = SelectV2ChannelsBroadcastFalseTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} |
63 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_CBRT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_CBRT_H_
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct CbrtOp {
struct Attributes {};
};
CbrtOp Create(CbrtOp::Attributes);
absl::Status Prepare(CbrtOp& op, const Tensor& input, Tensor& output);
absl::Status Evaluate(CbrtOp& op, const Tensor& input, Tensor& output);
}
#endif
#include "tensorflow/lite/experimental/shlo/ops/cbrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Cbrt {
template <class T>
T operator()(T v) const {
return std::cbrt(v);
}
};
template <>
F16 Cbrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cbrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CbrtOp Create(CbrtOp::Attributes) { return {}; }
absl::Status Prepare(CbrtOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cbrt"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cbrt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CbrtOp& op, const Tensor& input, Tensor& output) {
Cbrt cbrt;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), cbrt, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
cbrt, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/cbrt.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<CbrtOp> {
static std::string Get() { return "Cbrt"; }
};
namespace {
struct Cbrt {
template <class T>
T operator()(T v) const {
return std::cbrt(v);
}
} cbrt_ref;
template <>
F16 Cbrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Cbrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Cbrt, UnaryElementwiseOpShapePropagationTest,
CbrtOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Cbrt, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<CbrtOp>, TestParamNames);
using UnsupportedTypes = WithOpTypes<
CbrtOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Cbrt, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct CbrtTest : ::testing::Test {};
TYPED_TEST_SUITE(CbrtTest, FloatTestTypes, TestParamNames);
TYPED_TEST(CbrtTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), cbrt_ref);
auto op = Create(CbrtOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedCbrtTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedCbrtTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedCbrtTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = cbrt_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(CbrtOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} |
64 | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_REMOTE_DEVICE_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_REMOTE_DEVICE_H_
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tsl {
class Env;
}
namespace tensorflow {
using Env = tsl::Env;
class DeviceAttributes;
class Device;
class WorkerCacheInterface;
typedef std::function<Status(StringPiece name, Device** device)>
LookupLocalDevice;
void AsRemoteDevices(
Env* env,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
LookupLocalDevice lookup_local_device,
std::vector<std::unique_ptr<Device>>* remote_devices);
typedef std::function<void(const Status&, std::vector<Device*>*)>
NewRemoteDevicesDone;
void NewRemoteDevices(Env* env, WorkerCacheInterface* worker_cache,
const string& worker_name, NewRemoteDevicesDone done);
std::unique_ptr<Device> NewRemoteDevice(Env* env,
DeviceAttributes device_attribute);
}
#endif
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include <stdlib.h>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
class RemoteDevice : public Device {
public:
RemoteDevice(Env* env, const DeviceAttributes& da)
: Device(env, da),
local_dev_name_(DeviceNameUtils::LocalName(da.name())) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
ResourceMgr* resource_manager() override {
LOG(FATAL) << "Accessing the resource manager of a remote device is not "
<< "supported.";
std::abort();
}
bool IsLocal() const override { return false; }
bool IsRemoteCallAllowed() const override { return true; }
private:
const string local_dev_name_;
RemoteDevice(const RemoteDevice&) = delete;
void operator=(const RemoteDevice&) = delete;
};
void AsRemoteDevices(
Env* env,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
LookupLocalDevice lookup_local_device,
std::vector<std::unique_ptr<Device>>* remote_devices) {
for (const auto& da : device_attributes) {
Device* local_device;
if (lookup_local_device != nullptr &&
lookup_local_device(da.name(), &local_device).ok()) {
remote_devices->emplace_back(RenamedDevice::NewRenamedDevice(
local_device->name(), local_device, false, false));
} else {
auto d = new RemoteDevice(env, da);
remote_devices->emplace_back(d);
}
}
}
void NewRemoteDevices(Env* env, WorkerCacheInterface* worker_cache,
const string& worker_name, NewRemoteDevicesDone done) {
WorkerInterface* wi = worker_cache->GetOrCreateWorker(worker_name);
if (wi == nullptr) {
std::vector<Device*> empty;
done(errors::NotFound("Device ", worker_name, " is not found."), &empty);
return;
}
struct Call {
GetStatusRequest req;
GetStatusResponse resp;
};
Call* call = new Call;
auto cb = [env, worker_cache, worker_name, done, wi,
call](const Status& status) {
Status s = status;
std::vector<Device*> remote_devices;
auto cleanup = gtl::MakeCleanup(
[&worker_cache, &worker_name, &wi, &done, &remote_devices, &s, call] {
worker_cache->ReleaseWorker(worker_name, wi);
done(s, &remote_devices);
delete call;
});
if (!s.ok()) {
return;
}
DeviceNameUtils::ParsedName worker_name_parsed;
if (!DeviceNameUtils::ParseFullName(worker_name, &worker_name_parsed) ||
!worker_name_parsed.has_job || !worker_name_parsed.has_replica ||
!worker_name_parsed.has_task) {
s = errors::InvalidArgument("Could not parse worker name: ", worker_name);
LOG(WARNING) << s;
return;
}
remote_devices.reserve(call->resp.device_attributes_size());
for (const DeviceAttributes& da : call->resp.device_attributes()) {
DeviceNameUtils::ParsedName device_name_parsed;
CHECK(DeviceNameUtils::ParseFullName(da.name(), &device_name_parsed))
<< "Device attribute name '" << da.name() << "' could not be "
<< "parsed. Device Attribute: " << da.DebugString();
if (device_name_parsed.job == worker_name_parsed.job &&
device_name_parsed.replica == worker_name_parsed.replica &&
device_name_parsed.task == worker_name_parsed.task) {
auto d = new RemoteDevice(env, da);
remote_devices.push_back(d);
} else {
DeviceAttributes da_rewritten = da;
da_rewritten.set_name(DeviceNameUtils::FullName(
worker_name_parsed.job, worker_name_parsed.replica,
worker_name_parsed.task, device_name_parsed.type,
device_name_parsed.id));
auto d = new RemoteDevice(env, da_rewritten);
if (getenv("TPU_NO_POPULATE_DEVICE_LIST_FROM_CLUSTER_SPEC") !=
nullptr) {
if (worker_name_parsed.job == "worker" ||
device_name_parsed.type.find("TPU") == std::string::npos) {
remote_devices.push_back(d);
}
} else {
remote_devices.push_back(d);
}
}
}
};
wi->GetStatusAsync(nullptr, &call->req, &call->resp,
false, cb);
}
std::unique_ptr<Device> NewRemoteDevice(Env* env,
DeviceAttributes device_attribute) {
return std::make_unique<RemoteDevice>(env, device_attribute);
}
} | #include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
const char* const kSession = "remote_session";
class RemoteDeviceTest : public ::testing::Test {
protected:
string remote_name_;
std::unique_ptr<WorkerCacheInterface> worker_cache_;
WorkerInterface* wi_;
std::vector<Device*> devices_;
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env_;
RemoteDeviceTest() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(options, 1, &cluster_));
const string& hostport = cluster_->targets()[0];
GrpcChannelSpec spec;
TF_CHECK_OK(spec.AddHostPortsJob("localhost", {hostport}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
std::shared_ptr<GrpcChannelCache> channel_cache(
NewGrpcChannelCache(spec, channel_func));
grpc_worker_env_.reset(CreateGrpcWorkerEnv());
worker_cache_.reset(
NewGrpcWorkerCache(channel_cache, grpc_worker_env_.get()));
remote_name_ = "/job:localhost/replica:0/task:0";
wi_ = worker_cache_->GetOrCreateWorker(remote_name_);
}
~RemoteDeviceTest() override {
worker_cache_->ReleaseWorker(remote_name_, wi_);
}
void SetUp() override {
Notification n;
NewRemoteDevices(Env::Default(), worker_cache_.get(), remote_name_,
[&n, this](const Status& s, std::vector<Device*>* found) {
TF_CHECK_OK(s);
devices_ = *found;
n.Notify();
});
n.WaitForNotification();
EXPECT_EQ(devices_.size(), 2);
std::sort(devices_.begin(), devices_.end(), [](Device* a, Device* b) {
return a->name().compare(b->name()) < 0;
});
}
void TearDown() override {
for (auto d : devices_) delete d;
}
};
TEST_F(RemoteDeviceTest, GetStatus) {
EXPECT_EQ(devices_[0]->name(),
strings::StrCat(remote_name_, "/device:CPU:0"));
EXPECT_EQ(devices_[0]->attributes().device_type(),
DeviceType(DEVICE_CPU).type());
EXPECT_EQ(devices_[0]->attributes().memory_limit(), 256 << 20);
EXPECT_EQ(devices_[1]->name(),
strings::StrCat(remote_name_, "/device:CPU:1"));
EXPECT_EQ(devices_[1]->attributes().memory_limit(), 256 << 20);
}
} |
65 | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSPOSE_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSPOSE_OP_H_
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyTransposeTo(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DimensionIndex> target_dimensions, bool domain_only);
Result<IndexTransform<>> ApplyTransposeToDynamic(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DynamicDimSpec> target_dim_specs, bool domain_only);
Result<IndexTransform<>> ApplyTranspose(
IndexTransform<> transform, span<const DynamicDimSpec> source_dim_specs,
bool domain_only);
template <typename Container>
struct TransposeToOp {
static constexpr bool selected_dimensions_are_new = false;
static constexpr DimensionIndex static_selection_rank =
internal::ConstSpanType<Container>::extent;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of target "
"dimensions.");
return num_input_dims == dynamic_rank ? static_selection_rank
: num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyTransposeTo(std::move(transform), dimensions, target_dimensions,
domain_only);
}
Container target_dimensions;
};
Result<IndexTransform<>> ApplyTranspose(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only);
struct TransposeOp {
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(input_rank, num_input_dims) &&
"Number of selected dimensions must equal input rank.");
return input_rank == dynamic_rank ? num_input_dims : input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyTranspose(std::move(transform), dimensions, domain_only);
}
};
Result<IndexTransform<>> ApplyMoveDimsTo(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
DimensionIndex target,
bool domain_only);
struct MoveToOp {
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyMoveDimsTo(std::move(transform), dimensions, target,
domain_only);
}
DimensionIndex target;
};
}
}
#endif
#include "tensorstore/index_space/internal/transpose_op.h"
#include <cassert>
#include <numeric>
#include "absl/status/status.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transpose.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status MakePermutationFromMoveDimsTarget(
DimensionIndexBuffer* dimensions, DimensionIndex target,
span<DimensionIndex> permutation) {
if (dimensions->empty()) {
std::iota(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
const DimensionIndex input_rank = permutation.size();
const DimensionIndex num_dims = dimensions->size();
TENSORSTORE_ASSIGN_OR_RETURN(
target, NormalizeDimensionIndex(target, input_rank - num_dims + 1));
std::fill(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(-1));
DimensionSet moved_dims = false;
for (DimensionIndex i = 0; i < num_dims; ++i) {
DimensionIndex& input_dim = (*dimensions)[i];
moved_dims[input_dim] = true;
permutation[target + i] = input_dim;
input_dim = target + i;
}
for (DimensionIndex i = 0, orig_input_dim = 0; i < input_rank; ++i) {
if (permutation[i] != -1) continue;
while (moved_dims[orig_input_dim]) ++orig_input_dim;
permutation[i] = orig_input_dim++;
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyMoveDimsTo(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
DimensionIndex target,
bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
DimensionIndex permutation[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(MakePermutationFromMoveDimsTarget(
dimensions, target, span<DimensionIndex>(&permutation[0], input_rank)));
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTranspose(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
if (static_cast<DimensionIndex>(dimensions->size()) !=
transform.input_rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", dimensions->size(),
") must equal input_rank (", transform.input_rank(), ")."));
}
TransformRep::Ptr<> rep = TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)), *dimensions,
domain_only);
std::iota(dimensions->begin(), dimensions->end(),
static_cast<DimensionIndex>(0));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> ApplyTransposeTo(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DimensionIndex> target_dimensions, bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
if (static_cast<DimensionIndex>(dimensions->size()) !=
target_dimensions.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", dimensions->size(),
") must equal number of target dimensions (", target_dimensions.size(),
")"));
}
DimensionSet seen_existing_dim = false;
DimensionIndex permutation[kMaxRank];
std::fill_n(permutation, input_rank, -1);
for (DimensionIndex i = 0; i < target_dimensions.size(); ++i) {
DimensionIndex& orig_dim = (*dimensions)[i];
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex target_dim,
NormalizeDimensionIndex(target_dimensions[i], input_rank));
if (permutation[target_dim] != -1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Target dimension ", target_dim, " occurs more than once"));
}
seen_existing_dim[orig_dim] = true;
permutation[target_dim] = orig_dim;
orig_dim = target_dim;
}
for (DimensionIndex orig_dim = 0, target_dim = 0; orig_dim < input_rank;
++orig_dim) {
if (seen_existing_dim[orig_dim]) continue;
while (permutation[target_dim] != -1) ++target_dim;
permutation[target_dim] = orig_dim;
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTransposeToDynamic(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DynamicDimSpec> target_dim_specs, bool domain_only) {
if (target_dim_specs.size() == 1) {
if (auto* target = std::get_if<DimensionIndex>(&target_dim_specs.front())) {
return ApplyMoveDimsTo(std::move(transform), dimensions, *target,
domain_only);
}
}
DimensionIndexBuffer target_dimensions;
const DimensionIndex input_rank = transform.input_rank();
for (const auto& s : target_dim_specs) {
if (auto* index = std::get_if<DimensionIndex>(&s)) {
target_dimensions.push_back(*index);
} else if (auto* r = std::get_if<DimRangeSpec>(&s)) {
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDimRangeSpec(*r, input_rank, &target_dimensions));
} else {
return absl::InvalidArgumentError(
"Target dimensions cannot be specified by label");
}
}
return ApplyTransposeTo(std::move(transform), dimensions, target_dimensions,
domain_only);
}
Result<IndexTransform<>> ApplyTranspose(
IndexTransform<> transform, span<const DynamicDimSpec> source_dim_specs,
bool domain_only) {
DimensionIndexBuffer source_dimensions;
source_dimensions.reserve(transform.input_rank());
TENSORSTORE_RETURN_IF_ERROR(NormalizeDynamicDimSpecs(
source_dim_specs, transform.input_labels(), &source_dimensions));
if (!IsValidPermutation(source_dimensions)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Source dimension list ", span(source_dimensions),
" is not a valid dimension permutation for rank ",
transform.input_rank()));
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
source_dimensions, domain_only));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TransposeTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({3, 1, 2})
.input_shape({2, 3, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.input_labels({"z", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {{{2, 3, 4}, {4, 2, 3}}};
TestDimExpression(original_transform,
Dims(2, 0, 1).Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("z", "x", "y").Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TransposeTest, Simple) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
TEST(TransposeTest, Constant) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1, 0).Transpose(),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
{});
}
TEST(TransposeTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1).Transpose(), absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) must equal input_rank \\(2\\)\\.");
}
TEST(TransposeTest, Labeled) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.input_labels({"a", "b", "c", "d"})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
} |
66 | #ifndef TENSORFLOW_TSL_LIB_RANDOM_DISTRIBUTION_SAMPLER_H_
#define TENSORFLOW_TSL_LIB_RANDOM_DISTRIBUTION_SAMPLER_H_
#include <memory>
#include <utility>
#include "absl/types/span.h"
#include "tsl/lib/random/simple_philox.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
class DistributionSampler {
public:
explicit DistributionSampler(const absl::Span<const float> weights);
~DistributionSampler() {}
int Sample(SimplePhilox* rand) const {
float r = rand->RandFloat();
int idx = rand->Uniform(num_);
if (r < prob(idx)) return idx;
DCHECK_NE(-1, alt(idx));
return alt(idx);
}
int num() const { return num_; }
private:
float prob(int idx) const {
DCHECK_LT(idx, num_);
return data_[idx].first;
}
int alt(int idx) const {
DCHECK_LT(idx, num_);
return data_[idx].second;
}
void set_prob(int idx, float f) {
DCHECK_LT(idx, num_);
data_[idx].first = f;
}
void set_alt(int idx, int val) {
DCHECK_LT(idx, num_);
data_[idx].second = val;
}
int num_;
std::unique_ptr<std::pair<float, int>[]> data_;
DistributionSampler(const DistributionSampler&) = delete;
void operator=(const DistributionSampler&) = delete;
};
}
}
#endif
#include "tsl/lib/random/distribution_sampler.h"
#include <memory>
#include <vector>
#include "absl/types/span.h"
namespace tsl {
namespace random {
DistributionSampler::DistributionSampler(
const absl::Span<const float> weights) {
DCHECK(!weights.empty());
int n = weights.size();
num_ = n;
data_.reset(new std::pair<float, int>[n]);
std::unique_ptr<double[]> pr(new double[n]);
double sum = 0.0;
for (int i = 0; i < n; i++) {
sum += weights[i];
set_alt(i, -1);
}
std::vector<int> high;
high.reserve(n);
std::vector<int> low;
low.reserve(n);
for (int i = 0; i < n; i++) {
double p = (weights[i] * n) / sum;
pr[i] = p;
if (p < 1.0) {
low.push_back(i);
} else {
high.push_back(i);
}
}
while (!high.empty() && !low.empty()) {
int l = low.back();
low.pop_back();
int h = high.back();
high.pop_back();
set_alt(l, h);
DCHECK_GE(pr[h], 1.0);
double remaining = pr[h] - (1.0 - pr[l]);
pr[h] = remaining;
if (remaining < 1.0) {
low.push_back(h);
} else {
high.push_back(h);
}
}
for (int i = 0; i < n; i++) {
set_prob(i, pr[i]);
}
for (size_t i = 0; i < high.size(); i++) {
int idx = high[i];
set_prob(idx, 1.0);
set_alt(idx, idx);
}
for (size_t i = 0; i < low.size(); i++) {
int idx = low[i];
set_prob(idx, 1.0);
set_alt(idx, idx);
}
}
}
} | #include "tsl/lib/random/distribution_sampler.h"
#include <string.h>
#include <memory>
#include <vector>
#include "tsl/lib/random/simple_philox.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
class DistributionSamplerTest : public ::testing::Test {
protected:
float TestWeights(const std::vector<float>& weights, int trials_per_bin) {
int iters = weights.size() * trials_per_bin;
std::unique_ptr<float[]> counts(new float[weights.size()]);
memset(counts.get(), 0, sizeof(float) * weights.size());
DistributionSampler sampler(weights);
PhiloxRandom philox(testing::RandomSeed(), 17);
SimplePhilox random(&philox);
for (int i = 0; i < iters; i++) {
int r = sampler.Sample(&random);
EXPECT_LT(r, weights.size());
EXPECT_GE(r, 0);
counts[r] += 1.0;
}
float chi2 = 0.0;
for (size_t i = 0; i < weights.size(); i++) {
counts[i] /= iters;
float err = (counts[i] - weights[i]);
chi2 += (err * err) / weights[i];
}
return chi2;
}
void TestDistribution(float* arr, int n) {
std::vector<float> w;
w.reserve(n);
for (int i = 0; i < n; i++) {
w.push_back(arr[i]);
}
float var = TestWeights(w, 1000);
if (var < 0.001) return;
var = TestWeights(w, 100000);
if (var < 0.001) return;
EXPECT_TRUE(false) << "Chi2 is " << var << " in " << n * 100000
<< "iterations";
}
};
TEST_F(DistributionSamplerTest, KnownDistribution) {
float kEven2[] = {0.5, 0.5};
float kEven3[] = {0.33333333, 0.33333333, 0.33333333};
float kEven4[] = {0.25, 0.25, 0.25, 0.25};
float kDist1[] = {0.8, 0.15, 0.05};
TestDistribution(kEven2, TF_ARRAYSIZE(kEven2));
TestDistribution(kEven3, TF_ARRAYSIZE(kEven3));
TestDistribution(kEven4, TF_ARRAYSIZE(kEven4));
TestDistribution(kDist1, TF_ARRAYSIZE(kDist1));
}
static void BM_DistributionSampler(::testing::benchmark::State& state) {
const int n = state.range(0);
PhiloxRandom philox(173, 371);
SimplePhilox rand(&philox);
std::vector<float> weights(n, 0);
for (int i = 0; i < n; i++) {
weights[i] = rand.Uniform(100);
}
DistributionSampler picker(weights);
int r = 0;
for (auto s : state) {
r |= picker.Sample(&rand);
}
CHECK_NE(r, kint32max);
}
BENCHMARK(BM_DistributionSampler)->Arg(10)->Arg(100)->Arg(1000);
}
} |
67 | #ifndef AROLLA_NAMING_POLICY_H_
#define AROLLA_NAMING_POLICY_H_
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/naming/table.h"
namespace arolla::naming {
class PolicyImpl;
class Policy {
public:
explicit Policy(const PolicyImpl& policy_impl) : policy_impl_(&policy_impl) {}
const std::string& Name() const;
std::string Format(const ColumnPath& path) const;
std::string Format(const TablePath& path) const;
private:
const PolicyImpl* policy_impl_;
};
Policy DefaultPolicy();
constexpr absl::string_view kDefaultPolicyName = "default";
Policy DoubleUnderscorePolicy();
constexpr absl::string_view kDoubleUnderscorePolicyName = "double_underscore";
Policy SingleUnderscorePolicy();
constexpr absl::string_view kSingleUnderscorePolicyName = "single_underscore";
Policy LeafOnlyPolicy();
constexpr absl::string_view kLeafOnlyPolicyName = "leaf_only";
Policy ProtopathIdPolicy();
constexpr absl::string_view kProtopathIdPolicyName = "protopath_id";
Policy GoogleSQLPolicy();
constexpr absl::string_view kGoogleSQLPolicyName = "googlesql";
absl::StatusOr<Policy> GetPolicy(absl::string_view policy_name);
}
#endif
#include "arolla/naming/policy.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "arolla/naming/protopath_id.h"
#include "arolla/naming/table.h"
#include "arolla/util/indestructible.h"
namespace arolla::naming {
class PolicyImpl {
public:
explicit PolicyImpl(absl::string_view policy_name)
: policy_name_(policy_name) {}
virtual ~PolicyImpl() = default;
virtual std::string Format(const ColumnPath& path) const = 0;
virtual std::string Format(const TablePath& path) const = 0;
const std::string& Name() const { return policy_name_; }
private:
std::string policy_name_;
};
const std::string& Policy::Name() const { return policy_impl_->Name(); }
std::string Policy::Format(const ColumnPath& path) const {
return policy_impl_->Format(path);
}
std::string Policy::Format(const TablePath& path) const {
return policy_impl_->Format(path);
}
namespace {
class DefaultPolicyImpl : public PolicyImpl {
public:
DefaultPolicyImpl() : PolicyImpl(kDefaultPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return std::string(table_path.FullName());
}
std::string Format(const ColumnPath& column_path) const override {
return std::string(column_path.FullName());
}
};
class DoubleUnderscorePolicyImpl : public PolicyImpl {
public:
DoubleUnderscorePolicyImpl() : PolicyImpl(kDoubleUnderscorePolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Format(table_path.PathSegments());
}
std::string Format(const ColumnPath& column_path) const override {
return Format(column_path.PathSegments());
}
private:
static std::string MangleExtensionFieldName(absl::string_view field_name) {
if (absl::ConsumePrefix(&field_name, kExtensionFieldPrefix)) {
return absl::StrReplaceAll(absl::AsciiStrToLower(field_name),
{{".", "_"}});
} else {
return std::string(field_name);
}
}
std::string Format(const std::vector<PathSegment>& segments) const {
return absl::StrJoin(
segments, "__", [](std::string* ret, const PathSegment& segment) {
std::string field_name =
absl::StrReplaceAll(segment.FieldName(), {{"/", "__"}});
absl::StrAppend(ret, MangleExtensionFieldName(field_name));
});
}
};
class SingleUnderscorePolicyImpl : public PolicyImpl {
public:
SingleUnderscorePolicyImpl() : PolicyImpl(kSingleUnderscorePolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Reformat(table_path.FullName());
}
std::string Format(const ColumnPath& column_path) const override {
return Reformat(column_path.FullName());
}
private:
std::string Reformat(absl::string_view name) const {
if (name.empty()) return "";
return absl::StrReplaceAll(name.substr(1), {{"/", "_"}});
}
};
class LeafOnlyPolicyImpl : public PolicyImpl {
public:
LeafOnlyPolicyImpl() : PolicyImpl(kLeafOnlyPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Reformat(table_path.FullName());
}
std::string Format(const ColumnPath& column_path) const override {
return Reformat(column_path.FullName());
}
private:
std::string Reformat(absl::string_view name) const {
return std::string(absl::EndsWith(name, "@size")
? name
: name.substr(name.find_last_of('/') + 1));
}
};
class ProtopathIdPolicyImpl : public PolicyImpl {
public:
ProtopathIdPolicyImpl() : PolicyImpl(kProtopathIdPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return TablePathToProtopathId(table_path);
}
std::string Format(const ColumnPath& column_path) const override {
return ColumnPathToProtopathId(column_path);
}
};
class GoogleSQLPolicyImpl : public PolicyImpl {
public:
GoogleSQLPolicyImpl() : PolicyImpl(kGoogleSQLPolicyName) {}
std::string Format(const TablePath& table_path) const override {
return Format(table_path.PathSegments());
}
std::string Format(const ColumnPath& column_path) const override {
return Format(column_path.PathSegments());
}
private:
std::string Format(const std::vector<PathSegment>& segments) const {
return absl::StrJoin(
segments, ".", [](std::string* ret, const PathSegment& segment) {
absl::string_view field_name = segment.FieldName();
if (absl::ConsumePrefix(&field_name, kExtensionFieldPrefix)) {
absl::StrAppend(ret, "(", field_name, ")");
} else {
absl::StrAppend(ret, field_name);
}
});
}
};
}
Policy DefaultPolicy() {
static const Indestructible<DefaultPolicyImpl> impl;
return Policy{*impl};
}
Policy DoubleUnderscorePolicy() {
static const Indestructible<DoubleUnderscorePolicyImpl> impl;
return Policy{*impl};
}
Policy SingleUnderscorePolicy() {
static const Indestructible<SingleUnderscorePolicyImpl> impl;
return Policy{*impl};
}
Policy LeafOnlyPolicy() {
static const Indestructible<LeafOnlyPolicyImpl> impl;
return Policy{*impl};
}
Policy ProtopathIdPolicy() {
static const Indestructible<ProtopathIdPolicyImpl> impl;
return Policy{*impl};
}
Policy GoogleSQLPolicy() {
static const Indestructible<GoogleSQLPolicyImpl> impl;
return Policy{*impl};
}
absl::StatusOr<Policy> GetPolicy(absl::string_view policy_name) {
if (policy_name == kDefaultPolicyName) {
return DefaultPolicy();
}
if (policy_name == kDoubleUnderscorePolicyName) {
return DoubleUnderscorePolicy();
}
if (policy_name == kSingleUnderscorePolicyName) {
return SingleUnderscorePolicy();
}
if (policy_name == kLeafOnlyPolicyName) {
return LeafOnlyPolicy();
}
if (policy_name == kProtopathIdPolicyName) {
return ProtopathIdPolicy();
}
if (policy_name == kGoogleSQLPolicyName) {
return GoogleSQLPolicy();
}
return absl::InvalidArgumentError(
absl::StrFormat("undefined naming policy: %s", policy_name));
}
} | #include "arolla/naming/policy.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/naming/table.h"
#include "arolla/util/testing/status_matchers_backport.h"
using ::arolla::testing::StatusIs;
namespace arolla::naming {
namespace {
TEST(Policy, name) {
EXPECT_EQ(DefaultPolicy().Name(), "default");
EXPECT_EQ(DoubleUnderscorePolicy().Name(), "double_underscore");
}
TEST(Policy, format) {
TablePath root;
EXPECT_EQ(DefaultPolicy().Format(root), "");
EXPECT_EQ(DoubleUnderscorePolicy().Format(root), "");
EXPECT_EQ(SingleUnderscorePolicy().Format(root), "");
EXPECT_EQ(LeafOnlyPolicy().Format(root), "");
EXPECT_EQ(ProtopathIdPolicy().Format(root), "");
EXPECT_EQ(GoogleSQLPolicy().Format(root), "");
TablePath query("query");
EXPECT_EQ(DefaultPolicy().Format(query), "/query");
EXPECT_EQ(DoubleUnderscorePolicy().Format(query), "query");
EXPECT_EQ(SingleUnderscorePolicy().Format(query), "query");
EXPECT_EQ(LeafOnlyPolicy().Format(query), "query");
EXPECT_EQ(ProtopathIdPolicy().Format(query), "/query");
EXPECT_EQ(GoogleSQLPolicy().Format(query), "query");
TablePath doc = query.Child("docs", true);
EXPECT_EQ(DefaultPolicy().Format(doc), "/query/docs");
EXPECT_EQ(DoubleUnderscorePolicy().Format(doc), "query__docs");
EXPECT_EQ(SingleUnderscorePolicy().Format(doc), "query_docs");
EXPECT_EQ(LeafOnlyPolicy().Format(doc), "docs");
EXPECT_EQ(ProtopathIdPolicy().Format(doc), "/query/docs[:]");
EXPECT_EQ(GoogleSQLPolicy().Format(doc), "query.docs");
ColumnPath quality = doc.Column("quality");
EXPECT_EQ(DefaultPolicy().Format(quality), "/query/docs/quality");
EXPECT_EQ(DoubleUnderscorePolicy().Format(quality), "query__docs__quality");
EXPECT_EQ(SingleUnderscorePolicy().Format(quality), "query_docs_quality");
EXPECT_EQ(LeafOnlyPolicy().Format(quality), "quality");
EXPECT_EQ(ProtopathIdPolicy().Format(quality), "/query/docs[:]/quality");
EXPECT_EQ(GoogleSQLPolicy().Format(quality), "query.docs.quality");
ColumnPath terms_size = doc.Size("terms");
EXPECT_EQ(DefaultPolicy().Format(terms_size), "/query/docs/terms/@size");
EXPECT_EQ(DoubleUnderscorePolicy().Format(terms_size),
"query__docs__terms__@size");
EXPECT_EQ(SingleUnderscorePolicy().Format(terms_size),
"query_docs_terms_@size");
EXPECT_EQ(LeafOnlyPolicy().Format(terms_size), "/query/docs/terms/@size");
EXPECT_EQ(ProtopathIdPolicy().Format(terms_size),
"/query/docs[:]/terms/@size");
EXPECT_EQ(GoogleSQLPolicy().Format(terms_size), "query.docs.terms.@size");
TablePath ext = doc.Child(ProtoExtensionAccess("foo_pkg.Bar.baz_ext"));
EXPECT_EQ(DefaultPolicy().Format(ext),
"/query/docs/Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(DoubleUnderscorePolicy().Format(ext),
"query__docs__foo_pkg_bar_baz_ext");
EXPECT_EQ(SingleUnderscorePolicy().Format(ext),
"query_docs_Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(LeafOnlyPolicy().Format(ext), "Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(ProtopathIdPolicy().Format(ext),
"/query/docs[:]/Ext::foo_pkg.Bar.baz_ext");
EXPECT_EQ(GoogleSQLPolicy().Format(ext), "query.docs.(foo_pkg.Bar.baz_ext)");
}
TEST(Policy, get_policy) {
EXPECT_EQ(GetPolicy("default").value().Name(), "default");
EXPECT_EQ(GetPolicy("double_underscore").value().Name(), "double_underscore");
EXPECT_EQ(GetPolicy("single_underscore").value().Name(), "single_underscore");
EXPECT_EQ(GetPolicy("leaf_only").value().Name(), "leaf_only");
EXPECT_THAT(GetPolicy("unknown-policy-XYZ"),
StatusIs(absl::StatusCode::kInvalidArgument,
"undefined naming policy: unknown-policy-XYZ"));
}
}
} |
68 | #ifndef QUICHE_QUIC_TOOLS_QUIC_DEFAULT_CLIENT_H_
#define QUICHE_QUIC_TOOLS_QUIC_DEFAULT_CLIENT_H_
#include <cstdint>
#include <memory>
#include <string>
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/tools/quic_client_default_network_helper.h"
#include "quiche/quic/tools/quic_spdy_client_base.h"
namespace quic {
class QuicServerId;
namespace test {
class QuicDefaultClientPeer;
}
class QuicDefaultClient : public QuicSpdyClientBase {
public:
QuicDefaultClient(QuicSocketAddress server_address,
const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicEventLoop* event_loop,
std::unique_ptr<ProofVerifier> proof_verifier);
QuicDefaultClient(QuicSocketAddress server_address,
const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicEventLoop* event_loop,
std::unique_ptr<ProofVerifier> proof_verifier,
std::unique_ptr<SessionCache> session_cache);
QuicDefaultClient(QuicSocketAddress server_address,
const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
const QuicConfig& config, QuicEventLoop* event_loop,
std::unique_ptr<ProofVerifier> proof_verifier,
std::unique_ptr<SessionCache> session_cache);
QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicEventLoop* event_loop,
std::unique_ptr<QuicClientDefaultNetworkHelper> network_helper,
std::unique_ptr<ProofVerifier> proof_verifier);
QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
const QuicConfig& config, QuicEventLoop* event_loop,
std::unique_ptr<QuicClientDefaultNetworkHelper> network_helper,
std::unique_ptr<ProofVerifier> proof_verifier);
QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
const QuicConfig& config, QuicEventLoop* event_loop,
std::unique_ptr<QuicClientDefaultNetworkHelper> network_helper,
std::unique_ptr<ProofVerifier> proof_verifier,
std::unique_ptr<SessionCache> session_cache);
QuicDefaultClient(const QuicDefaultClient&) = delete;
QuicDefaultClient& operator=(const QuicDefaultClient&) = delete;
~QuicDefaultClient() override;
std::unique_ptr<QuicSession> CreateQuicClientSession(
const ParsedQuicVersionVector& supported_versions,
QuicConnection* connection) override;
int GetLatestFD() const { return default_network_helper()->GetLatestFD(); }
QuicClientDefaultNetworkHelper* default_network_helper();
const QuicClientDefaultNetworkHelper* default_network_helper() const;
};
}
#endif
#include "quiche/quic/tools/quic_default_client.h"
#include <memory>
#include <utility>
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_default_connection_helper.h"
#include "quiche/quic/core/quic_server_id.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/tools/quic_simple_client_session.h"
namespace quic {
QuicDefaultClient::QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicEventLoop* event_loop, std::unique_ptr<ProofVerifier> proof_verifier)
: QuicDefaultClient(
server_address, server_id, supported_versions, QuicConfig(),
event_loop,
std::make_unique<QuicClientDefaultNetworkHelper>(event_loop, this),
std::move(proof_verifier), nullptr) {}
QuicDefaultClient::QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicEventLoop* event_loop, std::unique_ptr<ProofVerifier> proof_verifier,
std::unique_ptr<SessionCache> session_cache)
: QuicDefaultClient(
server_address, server_id, supported_versions, QuicConfig(),
event_loop,
std::make_unique<QuicClientDefaultNetworkHelper>(event_loop, this),
std::move(proof_verifier), std::move(session_cache)) {}
QuicDefaultClient::QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions, const QuicConfig& config,
QuicEventLoop* event_loop, std::unique_ptr<ProofVerifier> proof_verifier,
std::unique_ptr<SessionCache> session_cache)
: QuicDefaultClient(
server_address, server_id, supported_versions, config, event_loop,
std::make_unique<QuicClientDefaultNetworkHelper>(event_loop, this),
std::move(proof_verifier), std::move(session_cache)) {}
QuicDefaultClient::QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicEventLoop* event_loop,
std::unique_ptr<QuicClientDefaultNetworkHelper> network_helper,
std::unique_ptr<ProofVerifier> proof_verifier)
: QuicDefaultClient(server_address, server_id, supported_versions,
QuicConfig(), event_loop, std::move(network_helper),
std::move(proof_verifier), nullptr) {}
QuicDefaultClient::QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions, const QuicConfig& config,
QuicEventLoop* event_loop,
std::unique_ptr<QuicClientDefaultNetworkHelper> network_helper,
std::unique_ptr<ProofVerifier> proof_verifier)
: QuicDefaultClient(server_address, server_id, supported_versions, config,
event_loop, std::move(network_helper),
std::move(proof_verifier), nullptr) {}
QuicDefaultClient::QuicDefaultClient(
QuicSocketAddress server_address, const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions, const QuicConfig& config,
QuicEventLoop* event_loop,
std::unique_ptr<QuicClientDefaultNetworkHelper> network_helper,
std::unique_ptr<ProofVerifier> proof_verifier,
std::unique_ptr<SessionCache> session_cache)
: QuicSpdyClientBase(server_id, supported_versions, config,
new QuicDefaultConnectionHelper(),
event_loop->CreateAlarmFactory().release(),
std::move(network_helper), std::move(proof_verifier),
std::move(session_cache)) {
set_server_address(server_address);
}
QuicDefaultClient::~QuicDefaultClient() = default;
std::unique_ptr<QuicSession> QuicDefaultClient::CreateQuicClientSession(
const ParsedQuicVersionVector& supported_versions,
QuicConnection* connection) {
return std::make_unique<QuicSimpleClientSession>(
*config(), supported_versions, connection, this, network_helper(),
server_id(), crypto_config(), drop_response_body(),
enable_web_transport());
}
QuicClientDefaultNetworkHelper* QuicDefaultClient::default_network_helper() {
return static_cast<QuicClientDefaultNetworkHelper*>(network_helper());
}
const QuicClientDefaultNetworkHelper*
QuicDefaultClient::default_network_helper() const {
return static_cast<const QuicClientDefaultNetworkHelper*>(network_helper());
}
} | #if defined(__linux__)
#include "quiche/quic/tools/quic_default_client.h"
#include <dirent.h>
#include <sys/types.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/io/quic_default_event_loop.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_default_clock.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_test_loopback.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
namespace test {
namespace {
const char* kPathToFds = "/proc/self/fd";
std::string ReadLink(const std::string& path) {
std::string result(PATH_MAX, '\0');
ssize_t result_size = readlink(path.c_str(), &result[0], result.size());
if (result_size < 0 && errno == ENOENT) {
return "";
}
QUICHE_CHECK(result_size > 0 &&
static_cast<size_t>(result_size) < result.size())
<< "result_size:" << result_size << ", errno:" << errno
<< ", path:" << path;
result.resize(result_size);
return result;
}
size_t NumOpenSocketFDs() {
size_t socket_count = 0;
dirent* file;
std::unique_ptr<DIR, int (*)(DIR*)> fd_directory(opendir(kPathToFds),
closedir);
while ((file = readdir(fd_directory.get())) != nullptr) {
absl::string_view name(file->d_name);
if (name == "." || name == "..") {
continue;
}
std::string fd_path = ReadLink(absl::StrCat(kPathToFds, "/", name));
if (absl::StartsWith(fd_path, "socket:")) {
socket_count++;
}
}
return socket_count;
}
class QuicDefaultClientTest : public QuicTest {
public:
QuicDefaultClientTest()
: event_loop_(GetDefaultEventLoop()->Create(QuicDefaultClock::Get())) {
CreateAndInitializeQuicClient();
}
std::unique_ptr<QuicDefaultClient> CreateAndInitializeQuicClient() {
QuicSocketAddress server_address(QuicSocketAddress(TestLoopback(), 0));
QuicServerId server_id("hostname", server_address.port(), false);
ParsedQuicVersionVector versions = AllSupportedVersions();
auto client = std::make_unique<QuicDefaultClient>(
server_address, server_id, versions, event_loop_.get(),
crypto_test_utils::ProofVerifierForTesting());
EXPECT_TRUE(client->Initialize());
return client;
}
private:
std::unique_ptr<QuicEventLoop> event_loop_;
};
TEST_F(QuicDefaultClientTest, DoNotLeakSocketFDs) {
size_t number_of_open_fds = NumOpenSocketFDs();
const int kNumClients = 50;
for (int i = 0; i < kNumClients; ++i) {
EXPECT_EQ(number_of_open_fds, NumOpenSocketFDs());
std::unique_ptr<QuicDefaultClient> client(CreateAndInitializeQuicClient());
EXPECT_EQ(number_of_open_fds + 1, NumOpenSocketFDs());
}
EXPECT_EQ(number_of_open_fds, NumOpenSocketFDs());
}
TEST_F(QuicDefaultClientTest, CreateAndCleanUpUDPSockets) {
size_t number_of_open_fds = NumOpenSocketFDs();
std::unique_ptr<QuicDefaultClient> client(CreateAndInitializeQuicClient());
EXPECT_EQ(number_of_open_fds + 1, NumOpenSocketFDs());
EXPECT_TRUE(client->default_network_helper()->CreateUDPSocketAndBind(
client->server_address(), client->bind_to_address(),
client->local_port()));
EXPECT_EQ(number_of_open_fds + 2, NumOpenSocketFDs());
EXPECT_TRUE(client->default_network_helper()->CreateUDPSocketAndBind(
client->server_address(), client->bind_to_address(),
client->local_port()));
EXPECT_EQ(number_of_open_fds + 3, NumOpenSocketFDs());
client->default_network_helper()->CleanUpUDPSocket(client->GetLatestFD());
EXPECT_EQ(number_of_open_fds + 2, NumOpenSocketFDs());
client->default_network_helper()->CleanUpUDPSocket(client->GetLatestFD());
EXPECT_EQ(number_of_open_fds + 1, NumOpenSocketFDs());
}
}
}
}
#endif |
69 | #ifndef XLA_SERVICE_HOST_MEMORY_TRANSFER_ASYNCIFIER_H_
#define XLA_SERVICE_HOST_MEMORY_TRANSFER_ASYNCIFIER_H_
#include <cstdint>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HostMemoryTransferAsyncifier : public HloModulePass {
public:
explicit HostMemoryTransferAsyncifier(int64_t host_memory_space_color)
: kHostMemorySpaceColor(host_memory_space_color) {}
~HostMemoryTransferAsyncifier() override = default;
absl::string_view name() const override {
return "host-memory-transfer-asyncifier";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const int64_t kHostMemorySpaceColor;
};
}
#endif
#include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HostMemoryTransferAsyncifierVisitor : public DfsHloVisitorWithDefault {
public:
explicit HostMemoryTransferAsyncifierVisitor(int64_t host_memory_space_color)
: kHostMemorySpaceColor(host_memory_space_color) {}
bool Changed() const { return changed_; }
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
return absl::OkStatus();
}
absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override {
HloInstruction* dynamic_slice_operand = dynamic_slice->mutable_operand(0);
if (!dynamic_slice->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), " does not have a layout.");
}
if (!dynamic_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), "'s operand, ",
dynamic_slice_operand->name(),
", does not have a layout.");
}
VLOG(3) << absl::StreamFormat(
"\"%s\" from S(%d) to S(%d)", dynamic_slice->name(),
dynamic_slice_operand->shape().layout().memory_space(),
dynamic_slice->shape().layout().memory_space());
if (dynamic_slice_operand->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_slice->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
VLOG(1) << "DynamicSlice \"" << dynamic_slice->name()
<< "\" is slicing from host memory. Converting to async.";
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
const Shape transfer_bytes_shape = ShapeUtil::MakeScalarShape(S32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
dynamic_slice->parent()->CreateAsyncInstructions(
dynamic_slice, {context_shape, transfer_bytes_shape}));
(void)async_done;
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) override {
HloInstruction* dynamic_update_slice_operand =
dynamic_update_slice->mutable_operand(0);
HloInstruction* dynamic_update_slice_update =
dynamic_update_slice->mutable_operand(1);
if (!dynamic_update_slice->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(),
" does not have a layout.");
}
if (!dynamic_update_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s operand, ",
dynamic_update_slice_operand->name(),
", does not have a layout.");
}
if (!dynamic_update_slice_update->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s update, ",
dynamic_update_slice_update->name(),
", does not have a layout.");
}
if (dynamic_update_slice_update->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
if (dynamic_update_slice->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_update_slice_operand->shape().layout().memory_space() !=
dynamic_update_slice->shape().layout().memory_space()) {
return InternalStrCat(
"Unexpected that ", dynamic_update_slice_operand->name(),
"'s memory space is not the same as the dynamic-update-slice.");
}
VLOG(1) << "DynamicUpdateSlice \"" << dynamic_update_slice->name()
<< "\" is slicing into host memory space. Converting to async.";
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(HloInstruction * async_done,
dynamic_update_slice->parent()->CreateAsyncInstructions(
dynamic_update_slice, {context_shape}));
(void)async_done;
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleCopy(HloInstruction* copy) override {
HloInstruction* operand = copy->mutable_operand(0);
if (!operand->shape().has_layout()) {
return InternalStrCat(operand->name(), " does not have a layout.");
}
if (!copy->shape().has_layout()) {
return InternalStrCat(copy->name(), " does not have a layout.");
}
const auto copy_src_memory_space = operand->shape().layout().memory_space();
const auto copy_dst_memory_space = copy->shape().layout().memory_space();
if (!((copy_src_memory_space == kHostMemorySpaceColor &&
copy_dst_memory_space == xla::Layout::kDefaultMemorySpace) ||
(copy_src_memory_space == xla::Layout::kDefaultMemorySpace &&
copy_dst_memory_space == kHostMemorySpaceColor))) {
VLOG(2)
<< "Skipping copy because it is not a copy between device memory and "
"host memory: "
<< copy->ToString();
return absl::OkStatus();
}
VLOG(1)
<< "Copy \"" << copy->name()
<< "\" is between device and host memory space. Converting to async.";
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
copy->parent()->CreateAsyncInstructions(copy, {context_shape}));
(void)async_done;
MarkAsChanged();
return absl::OkStatus();
}
private:
const int64_t kHostMemorySpaceColor;
bool changed_ = false;
void MarkAsChanged() { changed_ = true; }
};
}
absl::StatusOr<bool> HostMemoryTransferAsyncifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HostMemoryTransferAsyncifierVisitor visitor(kHostMemorySpaceColor);
for (HloComputation* computation : module->MakeNonfusionComputations()) {
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
}
return visitor.Changed();
}
} | #include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class HostMemoryTransferAsyncifierTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunAsyncifier(absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSIGN_OR_RETURN(bool changed, RunAsyncifier(module.get()));
return changed;
}
absl::StatusOr<bool> RunAsyncifier(HloModule* module) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostMemoryTransferAsyncifier asyncifier(kHostMemorySpaceColor);
return asyncifier.Run(module);
}
private:
static constexpr int64_t kHostMemorySpaceColor{5};
};
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_update_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_update_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_update_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_update_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
}
} |
70 | #ifndef TENSORFLOW_CORE_KERNELS_DATA_REWRITE_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_REWRITE_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
class RewriteDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "Rewrite";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kRewriteName = "rewrite_name";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit RewriteDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
};
}
}
#endif
#include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <map>
#include <string>
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
constexpr const char* const RewriteDatasetOp::kDatasetType;
constexpr const char* const RewriteDatasetOp::kInputDataset;
constexpr const char* const RewriteDatasetOp::kRewriteName;
constexpr const char* const RewriteDatasetOp::kOutputTypes;
constexpr const char* const RewriteDatasetOp::kOutputShapes;
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
tstring rewrite_name;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kRewriteName, &rewrite_name));
auto config_factory = [rewrite_name]() {
RewriterConfig rewriter_config;
rewriter_config.add_optimizers(std::string(rewrite_name));
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.set_fail_on_optimizer_errors(true);
return rewriter_config;
};
core::RefCountPtr<DatasetBase> rewritten;
OP_REQUIRES_OK(ctx, RewriteDataset(ctx, input, std::move(config_factory),
false, &rewritten));
*output = rewritten.release();
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#else
namespace tensorflow {
namespace data {
RewriteDatasetOp::RewriteDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RewriteDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
input->Ref();
*output = input;
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RewriteDataset").Device(DEVICE_CPU),
RewriteDatasetOp);
}
}
}
#endif | #include "tensorflow/core/kernels/data/rewrite_dataset_op.h"
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "rewrite_dataset";
constexpr char kReplicateOnSplit[] = "replicate_on_split";
class RewriteDatasetParams : public DatasetParams {
public:
template <typename T>
RewriteDatasetParams(T input_dataset_params, string rewrite_name,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
rewrite_name_(rewrite_name) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<tstring>(TensorShape({}), {rewrite_name_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {RewriteDatasetOp::kInputDataset,
RewriteDatasetOp::kRewriteName};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
return absl::OkStatus();
}
string dataset_type() const override {
return RewriteDatasetOp::kDatasetType;
}
private:
string rewrite_name_;
};
class RewriteDatasetOpTest : public DatasetOpsTestBase {};
TEST_F(RewriteDatasetOpTest, ReplicateOnSplit) {
auto range_dataset_params = RangeDatasetParams(0, 5, 1);
auto rewrite_dataset_params =
RewriteDatasetParams(std::move(range_dataset_params),
kReplicateOnSplit,
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
std::vector<Tensor> expected_outputs =
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}});
TF_ASSERT_OK(Initialize(rewrite_dataset_params));
TF_EXPECT_OK(CheckIteratorGetNext(expected_outputs, true));
}
}
}
} |
71 | #ifndef THIRD_PARTY_CEL_CPP_EXTENSIONS_SETS_FUNCTIONS_H_
#define THIRD_PARTY_CEL_CPP_EXTENSIONS_SETS_FUNCTIONS_H_
#include "absl/status/status.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel::extensions {
absl::Status RegisterSetsFunctions(FunctionRegistry& registry,
const RuntimeOptions& options);
}
#endif
#include "extensions/sets_functions.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel::extensions {
namespace {
absl::StatusOr<Value> SetsContains(ValueManager& value_factory,
const ListValue& list,
const ListValue& sublist) {
bool any_missing = false;
CEL_RETURN_IF_ERROR(sublist.ForEach(
value_factory,
[&list, &value_factory,
&any_missing](ValueView sublist_element) -> absl::StatusOr<bool> {
CEL_ASSIGN_OR_RETURN(auto contains,
list.Contains(value_factory, sublist_element));
any_missing = !contains->Is<BoolValue>() ||
!contains->As<BoolValue>().NativeValue();
return !any_missing;
}));
return value_factory.CreateBoolValue(!any_missing);
}
absl::StatusOr<Value> SetsIntersects(ValueManager& value_factory,
const ListValue& list,
const ListValue& sublist) {
bool exists = false;
CEL_RETURN_IF_ERROR(list.ForEach(
value_factory,
[&value_factory, &sublist,
&exists](ValueView list_element) -> absl::StatusOr<bool> {
CEL_ASSIGN_OR_RETURN(auto contains,
sublist.Contains(value_factory, list_element));
exists = contains->Is<BoolValue>() &&
contains->As<BoolValue>().NativeValue();
return !exists;
}));
return value_factory.CreateBoolValue(exists);
}
absl::StatusOr<Value> SetsEquivalent(ValueManager& value_factory,
const ListValue& list,
const ListValue& sublist) {
CEL_ASSIGN_OR_RETURN(auto contains_sublist,
SetsContains(value_factory, list, sublist));
if (contains_sublist->Is<BoolValue>() &&
!contains_sublist->As<BoolValue>().NativeValue()) {
return contains_sublist;
}
return SetsContains(value_factory, sublist, list);
}
absl::Status RegisterSetsContainsFunction(FunctionRegistry& registry) {
return registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, const ListValue&,
const ListValue&>::CreateDescriptor("sets.contains",
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, const ListValue&,
const ListValue&>::WrapFunction(SetsContains));
}
absl::Status RegisterSetsIntersectsFunction(FunctionRegistry& registry) {
return registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, const ListValue&,
const ListValue&>::CreateDescriptor("sets.intersects",
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, const ListValue&,
const ListValue&>::WrapFunction(SetsIntersects));
}
absl::Status RegisterSetsEquivalentFunction(FunctionRegistry& registry) {
return registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, const ListValue&,
const ListValue&>::CreateDescriptor("sets.equivalent",
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, const ListValue&,
const ListValue&>::WrapFunction(SetsEquivalent));
}
}
absl::Status RegisterSetsFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(RegisterSetsContainsFunction(registry));
CEL_RETURN_IF_ERROR(RegisterSetsIntersectsFunction(registry));
CEL_RETURN_IF_ERROR(RegisterSetsEquivalentFunction(registry));
return absl::OkStatus();
}
} | #include "extensions/sets_functions.h"
#include <memory>
#include <string>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function_adapter.h"
#include "eval/public/cel_options.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/arena.h"
namespace cel::extensions {
namespace {
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::api::expr::parser::ParseWithMacros;
using ::google::api::expr::runtime::Activation;
using ::google::api::expr::runtime::CelExpressionBuilder;
using ::google::api::expr::runtime::CelValue;
using ::google::api::expr::runtime::CreateCelExpressionBuilder;
using ::google::api::expr::runtime::FunctionAdapter;
using ::google::api::expr::runtime::InterpreterOptions;
using ::google::protobuf::Arena;
using cel::internal::IsOk;
struct TestInfo {
std::string expr;
};
class CelSetsFunctionsTest : public testing::TestWithParam<TestInfo> {};
TEST_P(CelSetsFunctionsTest, EndToEnd) {
const TestInfo& test_info = GetParam();
std::vector<Macro> all_macros = Macro::AllMacros();
auto result = ParseWithMacros(test_info.expr, all_macros, "<input>");
EXPECT_THAT(result, IsOk());
ParsedExpr parsed_expr = *result;
Expr expr = parsed_expr.expr();
SourceInfo source_info = parsed_expr.source_info();
InterpreterOptions options;
options.enable_heterogeneous_equality = true;
options.enable_empty_wrapper_null_unboxing = true;
options.enable_qualified_identifier_rewrites = true;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterSetsFunctions(builder->GetRegistry()->InternalGetRegistry(),
cel::RuntimeOptions{}));
ASSERT_OK(google::api::expr::runtime::RegisterBuiltinFunctions(
builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, &source_info));
Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsBool()) << test_info.expr << " -> " << out.DebugString();
EXPECT_TRUE(out.BoolOrDie()) << test_info.expr << " -> " << out.DebugString();
}
INSTANTIATE_TEST_SUITE_P(
CelSetsFunctionsTest, CelSetsFunctionsTest,
testing::ValuesIn<TestInfo>({
{"sets.contains([], [])"},
{"sets.contains([1], [])"},
{"sets.contains([1], [1])"},
{"sets.contains([1], [1, 1])"},
{"sets.contains([1, 1], [1])"},
{"sets.contains([2, 1], [1])"},
{"sets.contains([1], [1.0, 1u])"},
{"sets.contains([1, 2], [2u, 2.0])"},
{"sets.contains([1, 2u], [2, 2.0])"},
{"!sets.contains([1], [2])"},
{"!sets.contains([1], [1, 2])"},
{"!sets.contains([1], [\"1\", 1])"},
{"!sets.contains([1], [1.1, 2])"},
{"sets.intersects([1], [1])"},
{"sets.intersects([1], [1, 1])"},
{"sets.intersects([1, 1], [1])"},
{"sets.intersects([2, 1], [1])"},
{"sets.intersects([1], [1, 2])"},
{"sets.intersects([1], [1.0, 2])"},
{"sets.intersects([1, 2], [2u, 2, 2.0])"},
{"sets.intersects([1, 2], [1u, 2, 2.3])"},
{"!sets.intersects([], [])"},
{"!sets.intersects([1], [])"},
{"!sets.intersects([1], [2])"},
{"!sets.intersects([1], [\"1\", 2])"},
{"!sets.intersects([1], [1.1, 2u])"},
{"sets.equivalent([], [])"},
{"sets.equivalent([1], [1])"},
{"sets.equivalent([1], [1, 1])"},
{"sets.equivalent([1, 1, 2], [2, 2, 1])"},
{"sets.equivalent([1, 1], [1])"},
{"sets.equivalent([1], [1u, 1.0])"},
{"sets.equivalent([1], [1u, 1.0])"},
{"sets.equivalent([1, 2, 3], [3u, 2.0, 1])"},
{"!sets.equivalent([2, 1], [1])"},
{"!sets.equivalent([1], [1, 2])"},
{"!sets.equivalent([1, 2], [2u, 2, 2.0])"},
{"!sets.equivalent([1, 2], [1u, 2, 2.3])"},
{"sets.equivalent([false, true], [true, false])"},
{"!sets.equivalent([true], [false])"},
{"sets.equivalent(['foo', 'bar'], ['bar', 'foo'])"},
{"!sets.equivalent(['foo'], ['bar'])"},
{"sets.equivalent([b'foo', b'bar'], [b'bar', b'foo'])"},
{"!sets.equivalent([b'foo'], [b'bar'])"},
{"sets.equivalent([null], [null])"},
{"!sets.equivalent([null], [])"},
{"sets.equivalent([type(1), type(1u)], [type(1u), type(1)])"},
{"!sets.equivalent([type(1)], [type(1u)])"},
{"sets.equivalent([duration('0s'), duration('1s')], [duration('1s'), "
"duration('0s')])"},
{"!sets.equivalent([duration('0s')], [duration('1s')])"},
{"sets.equivalent([timestamp('1970-01-01T00:00:00Z'), "
"timestamp('1970-01-01T00:00:01Z')], "
"[timestamp('1970-01-01T00:00:01Z'), "
"timestamp('1970-01-01T00:00:00Z')])"},
{"!sets.equivalent([timestamp('1970-01-01T00:00:00Z')], "
"[timestamp('1970-01-01T00:00:01Z')])"},
{"sets.equivalent([[false, true]], [[false, true]])"},
{"!sets.equivalent([[false, true]], [[true, false]])"},
{"sets.equivalent([{'foo': true, 'bar': false}], [{'bar': false, "
"'foo': true}])"},
}));
}
} |
72 | #ifndef TENSORSTORE_KVSTORE_FILE_UTIL_H_
#define TENSORSTORE_KVSTORE_FILE_UTIL_H_
#include <string_view>
#include "tensorstore/kvstore/key_range.h"
namespace tensorstore {
namespace internal_file_util {
bool IsKeyValid(std::string_view key, std::string_view lock_suffix);
std::string_view LongestDirectoryPrefix(const KeyRange& range);
}
}
#endif
#include "tensorstore/kvstore/file/util.h"
#include <stddef.h>
#include <string_view>
#include "absl/strings/match.h"
#include "tensorstore/kvstore/key_range.h"
namespace tensorstore {
namespace internal_file_util {
bool IsKeyValid(std::string_view key, std::string_view lock_suffix) {
if (key.find('\0') != std::string_view::npos) return false;
if (key.empty()) return false;
if (key.back() == '/') return false;
while (true) {
size_t next_delimiter = key.find('/');
std::string_view component = next_delimiter == std::string_view::npos
? key
: key.substr(0, next_delimiter);
if (component == ".") return false;
if (component == "..") return false;
if (!lock_suffix.empty() && component.size() >= lock_suffix.size() &&
absl::EndsWith(component, lock_suffix)) {
return false;
}
if (next_delimiter == std::string_view::npos) return true;
key.remove_prefix(next_delimiter + 1);
}
}
std::string_view LongestDirectoryPrefix(const KeyRange& range) {
std::string_view prefix = tensorstore::LongestPrefix(range);
const size_t i = prefix.rfind('/');
if (i == std::string_view::npos) return {};
return prefix.substr(0, i);
}
}
} | #include "tensorstore/kvstore/file/util.h"
#include <gtest/gtest.h>
#include "tensorstore/kvstore/key_range.h"
namespace {
using ::tensorstore::KeyRange;
using ::tensorstore::internal_file_util::IsKeyValid;
using ::tensorstore::internal_file_util::LongestDirectoryPrefix;
TEST(IsKeyValid, Basic) {
EXPECT_TRUE(IsKeyValid("tmp/root", ""));
EXPECT_TRUE(IsKeyValid("a", ""));
EXPECT_TRUE(IsKeyValid("a/b", ""));
EXPECT_FALSE(IsKeyValid("", ""));
EXPECT_FALSE(IsKeyValid("/", ""));
EXPECT_TRUE(IsKeyValid("/tmp/root", ""));
EXPECT_FALSE(IsKeyValid("/tmp/root/", ""));
EXPECT_TRUE(IsKeyValid("tmp
EXPECT_FALSE(IsKeyValid("tmp/./root", ""));
EXPECT_FALSE(IsKeyValid("tmp/../root", ""));
EXPECT_FALSE(IsKeyValid("tmp/root/", ""));
EXPECT_FALSE(IsKeyValid("tmp/.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid("tmp/foo.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid(std::string_view("tmp/\0bar", 8), ""));
}
TEST(LongestDirectoryPrefix, Basic) {
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"a", "b"}));
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"/a", "/b"}));
EXPECT_EQ("/a", LongestDirectoryPrefix(KeyRange{"/a/a", "/a/b"}));
}
} |
73 | #ifndef TENSORSTORE_UTIL_UNIT_H_
#define TENSORSTORE_UTIL_UNIT_H_
#include <iosfwd>
#include <string>
#include <string_view>
#include <utility>
namespace tensorstore {
struct Unit {
Unit() = default;
Unit(std::string_view unit);
Unit(const char* unit) : Unit(std::string_view(unit)) {}
Unit(const std::string& unit) : Unit(std::string_view(unit)) {}
Unit(double multiplier, std::string base_unit)
: multiplier(multiplier), base_unit(std::move(base_unit)) {}
double multiplier = 1;
std::string base_unit;
friend std::ostream& operator<<(std::ostream& os, const Unit& unit);
std::string to_string() const;
template <typename Sink>
friend void AbslStringify(Sink& sink, const Unit& self) {
sink.Append(self.to_string());
}
friend bool operator==(const Unit& a, const Unit& b);
friend bool operator!=(const Unit& a, const Unit& b) { return !(a == b); }
friend Unit operator*(Unit u, double x) {
u.multiplier *= x;
return u;
}
friend Unit operator*(double x, Unit u) {
u.multiplier *= x;
return u;
}
friend Unit& operator*=(Unit& u, double x) {
u.multiplier *= x;
return u;
}
friend Unit operator/(Unit u, double x) {
u.multiplier /= x;
return u;
}
friend Unit& operator/=(Unit& u, double x) {
u.multiplier /= x;
return u;
}
static constexpr auto ApplyMembers = [](auto&& x, auto f) {
return f(x.multiplier, x.base_unit);
};
};
}
#endif
#include "tensorstore/util/unit.h"
#include <ostream>
#include <string>
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "re2/re2.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const Unit& unit) {
if (unit.base_unit.empty()) {
return os << unit.multiplier;
} else {
if (unit.multiplier != 1) {
os << unit.multiplier << ' ';
}
return os << unit.base_unit;
}
}
bool operator==(const Unit& a, const Unit& b) {
return a.multiplier == b.multiplier && a.base_unit == b.base_unit;
}
Unit::Unit(std::string_view unit) {
static LazyRE2 kNumberPattern = {
"([-+]?(?:\\.[0-9]+|[0-9]+(?:\\.[0-9]*)?)(?:[eE][-+]?\\d+)?)\\s*"};
while (!unit.empty() && absl::ascii_isspace(unit.front())) {
unit.remove_prefix(1);
}
while (!unit.empty() && absl::ascii_isspace(unit.back())) {
unit.remove_suffix(1);
}
RE2::Consume(&unit, *kNumberPattern, &multiplier);
base_unit = unit;
}
std::string Unit::to_string() const {
if (base_unit.empty()) {
return absl::StrCat(multiplier);
}
if (multiplier != 1) {
return absl::StrCat(multiplier, " ", base_unit);
}
return base_unit;
}
} | #include "tensorstore/util/unit.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/unit.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::TestJsonBinderRoundTrip;
using ::tensorstore::TestJsonBinderRoundTripJsonOnlyInexact;
using ::tensorstore::Unit;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(UnitTest, DefaultConstruct) {
Unit u;
EXPECT_EQ(1, u.multiplier);
EXPECT_EQ("", u.base_unit);
}
TEST(UnitTest, Compare) {
Unit a(5, "nm");
Unit b(5.5, "nm");
Unit c(5, "um");
Unit d;
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, c);
EXPECT_NE(b, d);
EXPECT_NE(c, d);
}
TEST(UnitTest, Ostream) {
EXPECT_EQ("5.5 nm", tensorstore::StrCat(Unit(5.5, "nm")));
EXPECT_EQ("nm", tensorstore::StrCat(Unit(1, "nm")));
EXPECT_EQ("5", tensorstore::StrCat(Unit(5, "")));
EXPECT_EQ("1", tensorstore::StrCat(Unit(1, "")));
}
TEST(UnitTest, ConvertToString) {
EXPECT_EQ("5.5 nm", Unit(5.5, "nm").to_string());
EXPECT_EQ("nm", Unit(1, "nm").to_string());
EXPECT_EQ("5", Unit(5, "").to_string());
EXPECT_EQ("1", Unit(1, "").to_string());
EXPECT_EQ("1", absl::StrCat(Unit(1, "")));
}
TEST(UnitTest, MultiplierBaseUnit) {
Unit u = {5, "nm"};
EXPECT_EQ(5, u.multiplier);
EXPECT_EQ("nm", u.base_unit);
}
TEST(UnitTest, Unit) {
EXPECT_EQ(Unit(4, "nm"), Unit("4nm"));
EXPECT_EQ(Unit(4, "nm"), Unit("4.nm"));
EXPECT_EQ(Unit(4e-3, "nm"), Unit("4e-3nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(".4nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(".4 nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(" .4 nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(" .4 nm "));
EXPECT_EQ(Unit(4e-3, "nm"), Unit("+4e-3nm"));
EXPECT_EQ(Unit(-4e-3, "nm"), Unit("-4e-3nm"));
EXPECT_EQ(Unit(4.5, "nm"), Unit("4.5nm"));
EXPECT_EQ(Unit(1, "nm"), Unit("nm"));
EXPECT_EQ(Unit(4, ""), Unit("4"));
EXPECT_EQ(Unit(1, ""), Unit(""));
EXPECT_EQ(Unit(3, "nm @ 50"), Unit("3 nm @ 50"));
}
TEST(UnitTest, JsonRoundTrip) {
TestJsonBinderRoundTrip<Unit>({
{Unit(4, "nm"), {4, "nm"}},
{Unit(4.5, "nm"), {4.5, "nm"}},
{Unit(4.5, ""), {4.5, ""}},
});
}
TEST(UnitTest, JsonRoundTripInexact) {
TestJsonBinderRoundTripJsonOnlyInexact<Unit>({
{"4nm", {4, "nm"}},
{4, {4, ""}},
{"nm", {1, "nm"}},
});
}
TEST(SerializationTest, Basic) {
TestSerializationRoundTrip(Unit("4nm"));
TestSerializationRoundTrip(Unit("4"));
TestSerializationRoundTrip(Unit("nm"));
TestSerializationRoundTrip(Unit(""));
}
} |
74 | #ifndef TENSORFLOW_TSL_PLATFORM_RETRYING_UTILS_H_
#define TENSORFLOW_TSL_PLATFORM_RETRYING_UTILS_H_
#include <functional>
#include "absl/time/time.h"
#include "tsl/platform/status.h"
namespace tsl {
struct RetryConfig {
RetryConfig(int64_t init_delay_time_us = 100 * 1000,
int64_t max_delay_time_us = 32 * 1000 * 1000,
int max_retries = 10) {
this->init_delay_time_us = init_delay_time_us;
this->max_delay_time_us = max_delay_time_us;
this->max_retries = max_retries;
}
int max_retries;
int64_t init_delay_time_us;
int64_t max_delay_time_us;
};
class RetryingUtils {
public:
static absl::Status CallWithRetries(const std::function<absl::Status()>& f,
const RetryConfig& config);
static absl::Status CallWithRetries(
const std::function<absl::Status()>& f,
const std::function<void(int64_t)>& sleep_usec,
const RetryConfig& config);
static absl::Status DeleteWithRetries(
const std::function<absl::Status()>& delete_func,
const RetryConfig& config);
};
absl::Duration ComputeRetryBackoff(
int current_retry_attempt, absl::Duration min_delay = absl::Milliseconds(1),
absl::Duration max_delay = absl::Seconds(10));
}
#endif
#include "tsl/platform/retrying_utils.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include "absl/time/time.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/random.h"
namespace tsl {
namespace {
bool IsRetriable(absl::StatusCode code) {
switch (code) {
case absl::StatusCode::kUnavailable:
case absl::StatusCode::kDeadlineExceeded:
case absl::StatusCode::kUnknown:
return true;
default:
return false;
}
}
double GenerateUniformRandomNumber() {
return random::New64() * (1.0 / std::numeric_limits<uint64_t>::max());
}
double GenerateUniformRandomNumberBetween(double a, double b) {
if (a == b) return a;
DCHECK_LT(a, b);
return a + GenerateUniformRandomNumber() * (b - a);
}
}
absl::Status RetryingUtils::CallWithRetries(
const std::function<absl::Status()>& f, const RetryConfig& config) {
return CallWithRetries(
f,
[](int64_t micros) {
return Env::Default()->SleepForMicroseconds(micros);
},
config);
}
absl::Status RetryingUtils::CallWithRetries(
const std::function<absl::Status()>& f,
const std::function<void(int64_t)>& sleep_usec, const RetryConfig& config) {
int retries = 0;
while (true) {
auto status = f();
if (!IsRetriable(status.code())) {
return status;
}
if (retries >= config.max_retries) {
return absl::Status(
absl::StatusCode::kAborted,
strings::StrCat(
"All ", config.max_retries,
" retry attempts failed. The last failure: ", status.message()));
}
int64_t delay_micros = 0;
if (config.init_delay_time_us > 0) {
const int64_t random_micros = random::New64() % 1000000;
delay_micros = std::min(config.init_delay_time_us << retries,
config.max_delay_time_us) +
random_micros;
}
VLOG(1) << "The operation failed and will be automatically retried in "
<< (delay_micros / 1000000.0) << " seconds (attempt "
<< (retries + 1) << " out of " << config.max_retries
<< "), caused by: " << status.ToString();
sleep_usec(delay_micros);
retries++;
}
}
absl::Status RetryingUtils::DeleteWithRetries(
const std::function<absl::Status()>& delete_func,
const RetryConfig& config) {
bool is_retried = false;
return RetryingUtils::CallWithRetries(
[delete_func, &is_retried]() {
const absl::Status status = delete_func();
if (is_retried && status.code() == error::NOT_FOUND) {
return absl::OkStatus();
}
is_retried = true;
return status;
},
config);
}
absl::Duration ComputeRetryBackoff(int current_retry_attempt,
absl::Duration min_delay,
absl::Duration max_delay) {
DCHECK_GE(current_retry_attempt, 0);
constexpr double kBackoffBase = 1.3;
constexpr double kBackoffRandMult = 0.4;
const absl::Duration first_term = min_delay * kBackoffRandMult;
absl::Duration uncapped_second_term =
min_delay * std::pow(kBackoffBase, current_retry_attempt);
absl::Duration second_term =
std::min(uncapped_second_term, max_delay - first_term);
second_term *=
GenerateUniformRandomNumberBetween(1.0 - kBackoffRandMult, 1.0);
return std::max(first_term + second_term, min_delay);
}
} | #include "tsl/platform/retrying_utils.h"
#include <cmath>
#include <fstream>
#include "absl/time/time.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(RetryingUtilsTest, CallWithRetries_RetryDelays) {
std::vector<double> requested_delays;
std::function<void(int64_t)> sleep = [&requested_delays](int64_t delay) {
requested_delays.emplace_back(delay / 1000000.0);
};
std::function<absl::Status()> f = []() {
return errors::Unavailable("Failed.");
};
const auto& status = RetryingUtils::CallWithRetries(
f, sleep, RetryConfig(500000 ));
EXPECT_TRUE(errors::IsAborted(status));
EXPECT_TRUE(absl::StrContains(
status.message(),
"All 10 retry attempts failed. The last failure: Failed."))
<< status;
EXPECT_EQ(10, requested_delays.size());
EXPECT_NEAR(0.5, requested_delays[0], 1.0);
EXPECT_NEAR(1.0, requested_delays[1], 1.0);
EXPECT_NEAR(2.0, requested_delays[2], 1.0);
EXPECT_NEAR(4.0, requested_delays[3], 1.0);
EXPECT_NEAR(8.0, requested_delays[4], 1.0);
EXPECT_NEAR(16.0, requested_delays[5], 1.0);
EXPECT_NEAR(32.0, requested_delays[6], 1.0);
EXPECT_NEAR(32.0, requested_delays[7], 1.0);
EXPECT_NEAR(32.0, requested_delays[8], 1.0);
EXPECT_NEAR(32.0, requested_delays[9], 1.0);
}
TEST(RetryingUtilsTest, CallWithRetries_NotFoundIsNotRetried) {
std::vector<absl::Status> results(
{errors::Unavailable("Failed."), errors::NotFound("Not found.")});
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
EXPECT_TRUE(errors::IsNotFound(RetryingUtils::CallWithRetries(
f, RetryConfig(0 ))));
}
TEST(RetryingUtilsTest, CallWithRetries_ImmediateSuccess) {
std::vector<absl::Status> results({absl::OkStatus()});
std::function<void(int64_t)> sleep = [](int64_t delay) {
ADD_FAILURE() << "Unexpected call to sleep.";
};
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::CallWithRetries(
f, sleep, RetryConfig(1L )));
}
TEST(RetryingUtilsTest, CallWithRetries_EventualSuccess) {
std::vector<absl::Status> results({errors::Unavailable("Failed."),
errors::Unavailable("Failed again."),
absl::OkStatus()});
std::function<absl::Status()> f = [&results]() {
auto result = results[0];
results.erase(results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::CallWithRetries(
f, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_ImmediateSuccess) {
std::vector<absl::Status> delete_results({absl::OkStatus()});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_EventualSuccess) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), absl::OkStatus()});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_PermissionDeniedNotRetried) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), errors::PermissionDenied("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
EXPECT_TRUE(errors::IsPermissionDenied(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 ))));
}
TEST(RetryingUtilsTest, DeleteWithRetries_SuccessThroughFileNotFound) {
std::vector<absl::Status> delete_results(
{errors::Unavailable(""), errors::NotFound("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
TF_EXPECT_OK(RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 )));
}
TEST(RetryingUtilsTest, DeleteWithRetries_FirstNotFoundReturnedAsIs) {
std::vector<absl::Status> delete_results({errors::NotFound("")});
const auto delete_func = [&delete_results]() {
auto result = delete_results[0];
delete_results.erase(delete_results.begin());
return result;
};
EXPECT_EQ(error::NOT_FOUND,
RetryingUtils::DeleteWithRetries(
delete_func, RetryConfig(0 ))
.code());
}
TEST(RetryingUtilsTest, ComputeRetryBackoff) {
for (int i = 0; i < 30; ++i) {
EXPECT_LE(0.4 * absl::Milliseconds(1) +
0.6 * absl::Milliseconds(1) * std::pow(1.3, i),
ComputeRetryBackoff(i));
EXPECT_LE(
ComputeRetryBackoff(i),
0.4 * absl::Milliseconds(1) + absl::Milliseconds(1) * std::pow(1.3, i));
}
}
TEST(RetryingUtilsTest, ComputeRetryBackoff_MinMaxDelays) {
for (int i = 0; i < 30; ++i) {
EXPECT_EQ(ComputeRetryBackoff(i,
absl::Seconds(10)),
absl::Seconds(10));
EXPECT_EQ(ComputeRetryBackoff(i,
absl::Microseconds(1),
absl::Microseconds(1)),
absl::Microseconds(1));
}
}
}
} |
75 | #ifndef XLA_SERVICE_HLO_PHI_GRAPH_H_
#define XLA_SERVICE_HLO_PHI_GRAPH_H_
#include <iterator>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_value.h"
namespace xla {
class PhiGraph {
public:
void RegisterPhi(const HloValue& value,
absl::Span<const HloValue* const> inputs);
HloValue::Id GetOptimizedId(const HloValue& value);
bool InputsEqualTo(const HloValue& value,
absl::Span<const HloValue* const> inputs);
HloValue::Id FindOptimizedValue(const HloValue::Id id);
void Optimize();
std::string ToString();
private:
struct Node {
bool is_phi;
std::vector<Node*> users;
std::vector<Node*> operands;
HloValue::Id value_id;
bool mark_as_dead = false;
};
Node* CreateOrReuseNode(const HloValue& value);
void ReplaceNodeWith(Node* node, Node* replace);
absl::flat_hash_map<Node*, std::vector<HloValue::Id>> node_to_value_id_;
absl::flat_hash_map<HloValue::Id, Node*> value_id_to_node_;
std::vector<std::unique_ptr<Node>> node_storage_;
};
}
#endif
#include "xla/service/hlo_phi_graph.h"
#include <queue>
namespace xla {
HloValue::Id PhiGraph::GetOptimizedId(const HloValue& value) {
Node* node = value_id_to_node_[value.id()];
CHECK(!node->mark_as_dead);
return node->value_id;
}
bool PhiGraph::InputsEqualTo(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
auto iter = value_id_to_node_.find(value.id());
CHECK(iter != value_id_to_node_.end());
absl::flat_hash_set<HloValue::Id> existing_set;
for (Node* operand : iter->second->operands) {
existing_set.insert(operand->value_id);
}
absl::flat_hash_set<HloValue::Id> new_set;
for (const HloValue* input : inputs) {
new_set.insert(input->id());
}
return existing_set == new_set;
}
HloValue::Id PhiGraph::FindOptimizedValue(const HloValue::Id id) {
auto iter = value_id_to_node_.find(id);
CHECK(iter != value_id_to_node_.end());
CHECK(!iter->second->mark_as_dead);
return iter->second->value_id;
}
PhiGraph::Node* PhiGraph::CreateOrReuseNode(const HloValue& value) {
auto iter = value_id_to_node_.find(value.id());
if (iter == value_id_to_node_.end()) {
node_storage_.emplace_back(std::make_unique<Node>());
Node* node = node_storage_.back().get();
node->value_id = value.id();
value_id_to_node_[value.id()] = node;
node_to_value_id_[node].push_back(value.id());
return node;
} else {
CHECK_NE(iter->second, nullptr);
CHECK_EQ(iter->second->value_id, value.id());
return iter->second;
}
}
void PhiGraph::ReplaceNodeWith(PhiGraph::Node* node, PhiGraph::Node* replace) {
CHECK(node->is_phi);
if (node->mark_as_dead) {
return;
}
if (replace->mark_as_dead) {
auto iter = value_id_to_node_.find(replace->value_id);
CHECK(iter != value_id_to_node_.end());
return ReplaceNodeWith(node, iter->second);
}
CHECK(!replace->mark_as_dead);
for (Node* user : node->users) {
absl::c_replace(user->operands, node, replace);
}
for (Node* operand : node->operands) {
absl::c_replace(operand->users, node, replace);
}
for (HloValue::Id value_id : node_to_value_id_[node]) {
CHECK(value_id_to_node_.contains(value_id));
value_id_to_node_[value_id] = replace;
}
absl::c_copy(node_to_value_id_[node],
std::back_inserter(node_to_value_id_[replace]));
node_to_value_id_[node].clear();
node->mark_as_dead = true;
}
void PhiGraph::RegisterPhi(const HloValue& value,
absl::Span<const HloValue* const> inputs) {
Node* node = CreateOrReuseNode(value);
CHECK(value.is_phi());
node->is_phi = true;
node->operands.clear();
for (auto input : inputs) {
CHECK(input != nullptr);
Node* input_node = CreateOrReuseNode(*input);
node->operands.push_back(input_node);
}
}
std::string PhiGraph::ToString() {
std::string out = "PhiGraph: \n";
for (auto& node : node_storage_) {
absl::StrAppend(&out, node->value_id);
if (node->is_phi) {
absl::StrAppend(&out, ", phi");
}
if (node->mark_as_dead) {
absl::StrAppend(&out, ", dead", ":\n");
}
for (Node* input : node->operands) {
absl::StrAppend(&out, " ", input->value_id, "\n");
}
}
return out;
}
void PhiGraph::Optimize() {
VLOG(2) << "Optimizing phi graph:";
XLA_VLOG_LINES(2, ToString());
for (auto& node : node_storage_) {
for (Node* input : node->operands) {
input->users.push_back(node.get());
}
}
bool changed = true;
while (changed) {
changed = false;
absl::flat_hash_set<Node*> checked_for_closure;
for (auto& node : node_storage_) {
if (!node->is_phi) {
continue;
}
if (node->mark_as_dead) {
continue;
}
Node* node_ptr = node.get();
VLOG(2) << "Optimizing: " << node_ptr->value_id;
CHECK_GE(node_ptr->operands.size(), 1);
auto it = absl::c_find(node_ptr->operands, node_ptr);
while (it != node_ptr->operands.end()) {
node_ptr->operands.erase(it);
it = absl::c_find(node_ptr->operands, node_ptr);
}
it = absl::c_find(node_ptr->users, node_ptr);
while (it != node_ptr->users.end()) {
node_ptr->users.erase(it);
it = absl::c_find(node_ptr->users, node_ptr);
}
CHECK_GE(node_ptr->operands.size(), 1);
bool all_inputs_are_same = absl::c_all_of(
node_ptr->operands,
[&](Node* elem) { return elem == node_ptr->operands[0]; });
if (all_inputs_are_same) {
VLOG(1) << "All inputs to node " << node_ptr->value_id
<< " are the same, replacing it with "
<< node_ptr->operands[0]->value_id;
ReplaceNodeWith(node_ptr, node_ptr->operands[0]);
changed = true;
continue;
}
if (checked_for_closure.contains(node_ptr)) {
continue;
}
absl::flat_hash_set<Node*> workset;
std::queue<Node*> worklist;
Node* non_phi = nullptr;
worklist.push(node_ptr);
while (!worklist.empty()) {
Node* todo = worklist.front();
worklist.pop();
if (workset.contains(todo)) {
continue;
}
checked_for_closure.insert(todo);
workset.insert(todo);
for (Node* operand : todo->operands) {
worklist.push(operand);
}
if (!todo->is_phi) {
if (non_phi != nullptr && non_phi != todo) {
non_phi = nullptr;
break;
} else {
non_phi = todo;
}
}
}
if (non_phi != nullptr) {
for (Node* node : workset) {
if (!node->is_phi) {
CHECK_EQ(node, non_phi);
continue;
}
VLOG(1) << "Replace node " << node->value_id
<< " in the closure with node " << non_phi->value_id;
ReplaceNodeWith(node, non_phi);
changed = true;
}
}
}
}
}
} | #include "xla/service/hlo_phi_graph.h"
#include "xla/literal_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class PhiGraphTest : public ::testing::Test {
protected:
HloValue NewHloValue(bool is_phi) {
static int64_t id = 0;
return HloValue(id++, dummy_inst_.get(), {}, is_phi);
}
void SetUp() override {
dummy_inst_ = HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f));
}
std::unique_ptr<HloInstruction> dummy_inst_;
};
TEST_F(PhiGraphTest, SelfReferencingPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, PhiWithSameInputs) {
PhiGraph phi_graph;
HloValue A = NewHloValue(false);
HloValue B = NewHloValue(true);
phi_graph.RegisterPhi(B, {&A, &A});
phi_graph.Optimize();
EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));
}
TEST_F(PhiGraphTest, CircularPhi) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&D, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
}
TEST_F(PhiGraphTest, NestedPhiReduction) {
PhiGraph phi_graph;
HloValue A = NewHloValue(true);
HloValue B = NewHloValue(true);
HloValue C = NewHloValue(true);
HloValue D = NewHloValue(false);
HloValue E = NewHloValue(true);
phi_graph.RegisterPhi(A, {&B, &C});
phi_graph.RegisterPhi(B, {&E, &C});
phi_graph.RegisterPhi(C, {&A, &B});
phi_graph.RegisterPhi(E, {&D, &D});
phi_graph.Optimize();
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));
EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(E.id()));
}
}
} |
76 | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_MEMORY_PROFILE_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_XPLANE_TO_MEMORY_PROFILE_H_
#include <string>
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
MemoryProfile ConvertXPlaneToMemoryProfile(const XPlane& host_plane,
int64_t max_num_snapshots = 1000);
Status ConvertXSpaceToMemoryProfileJson(const XSpace& xspace,
std::string* json_output);
}
}
#endif
#include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr int64_t kInvalidStepId = -1;
using IndexMetaPair =
std::pair<int64_t , const MemoryActivityMetadata*>;
bool IsMemoryAllocation(int64_t event_type) {
return event_type == HostEventType::kMemoryAllocation;
}
bool IsMemoryDeallocation(int64_t event_type) {
return event_type == HostEventType::kMemoryDeallocation;
}
void UpdateProfileSummary(const MemoryAggregationStats& stats,
int64_t time_offset_ps,
MemoryProfileSummary* summary) {
summary->set_peak_bytes_usage_lifetime(stats.peak_bytes_in_use());
MemoryAggregationStats* peak_stats = summary->mutable_peak_stats();
if (stats.stack_reserved_bytes() + stats.heap_allocated_bytes() >=
peak_stats->peak_bytes_in_use()) {
*peak_stats = stats;
peak_stats->set_peak_bytes_in_use(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes());
summary->set_peak_stats_time_ps(time_offset_ps);
summary->set_memory_capacity(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes() +
stats.free_memory_bytes());
}
}
MemoryProfile GenerateMemoryProfile(const XPlane* host_trace) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
MemoryProfile memory_profile;
plane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t event_type =
event.Type().value_or(HostEventType::kUnknownHostEventType);
if (!(IsMemoryAllocation(event_type) ||
IsMemoryDeallocation(event_type))) {
return;
}
MemoryAggregationStats stats;
MemoryActivityMetadata metadata;
if (IsMemoryAllocation(event_type)) {
metadata.set_memory_activity(ALLOCATION);
} else if (IsMemoryDeallocation(event_type)) {
metadata.set_memory_activity(DEALLOCATION);
}
metadata.set_step_id(kInvalidStepId);
std::string memory_id;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kIndexOnHost:
case StatType::kDeviceOrdinal:
memory_id = absl::StrCat(stat.IntValue());
break;
case StatType::kAllocatorName:
memory_id = std::string(stat.StrOrRefValue());
break;
case StatType::kBytesReserved:
stats.set_stack_reserved_bytes(stat.IntValue());
break;
case StatType::kBytesAllocated:
stats.set_heap_allocated_bytes(stat.IntValue());
break;
case StatType::kBytesAvailable:
stats.set_free_memory_bytes(stat.IntValue());
break;
case StatType::kFragmentation:
stats.set_fragmentation(stat.DoubleValue());
break;
case StatType::kPeakBytesInUse:
stats.set_peak_bytes_in_use(stat.IntValue());
break;
case StatType::kRequestedBytes:
metadata.set_requested_bytes(stat.IntValue());
break;
case StatType::kAllocationBytes:
metadata.set_allocation_bytes(stat.IntValue());
break;
case StatType::kAddress:
metadata.set_address(stat.IntValue());
break;
case StatType::kTfOp:
metadata.set_tf_op_name(std::string(stat.StrOrRefValue()));
break;
case StatType::kGroupId:
metadata.set_step_id(stat.IntValue());
break;
case StatType::kRegionType:
metadata.set_region_type(std::string(stat.StrOrRefValue()));
break;
case StatType::kDataType:
metadata.set_data_type(tensorflow::DataTypeString(
static_cast<tensorflow::DataType>(stat.IntValue())));
break;
case StatType::kTensorShapes:
metadata.set_tensor_shape(std::string(stat.StrOrRefValue()));
break;
}
});
MemoryProfileSummary* summary =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.mutable_profile_summary();
UpdateProfileSummary(stats, event.OffsetPs(), summary);
MemoryProfileSnapshot* snapshot =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.add_memory_profile_snapshots();
snapshot->set_time_offset_ps(event.OffsetPs());
*snapshot->mutable_aggregation_stats() = std::move(stats);
*snapshot->mutable_activity_metadata() = std::move(metadata);
});
});
return memory_profile;
}
void UpdateStepId(PerAllocatorMemoryProfile* memory_profile) {
int64_t last_valid_step_id = -1;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
DCHECK(snapshot.has_activity_metadata());
if (snapshot.mutable_activity_metadata()->step_id() == kInvalidStepId) {
snapshot.mutable_activity_metadata()->set_step_id(last_valid_step_id + 1);
} else {
last_valid_step_id = snapshot.mutable_activity_metadata()->step_id();
}
}
}
void UpdateDeallocation(PerAllocatorMemoryProfile* memory_profile) {
absl::flat_hash_map<uint64 , const MemoryActivityMetadata*>
addr_metadata_map;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
uint64 address = snapshot.activity_metadata().address();
if (snapshot.activity_metadata().memory_activity() == DEALLOCATION) {
if (addr_metadata_map.contains(address)) {
const MemoryActivityMetadata* alloc_meta = addr_metadata_map[address];
snapshot.mutable_activity_metadata()->set_tf_op_name(
alloc_meta->tf_op_name());
snapshot.mutable_activity_metadata()->set_region_type(
alloc_meta->region_type());
snapshot.mutable_activity_metadata()->set_data_type(
alloc_meta->data_type());
snapshot.mutable_activity_metadata()->set_tensor_shape(
alloc_meta->tensor_shape());
addr_metadata_map.erase(address);
} else {
VLOG(2)
<< "Can't find matching memory allocation for this deallocation: "
<< snapshot.DebugString();
}
} else if (!addr_metadata_map.contains(address)) {
addr_metadata_map[address] = &snapshot.activity_metadata();
} else {
VLOG(2) << "There are two allocations recorded for the same address: "
<< address
<< ". The later allocation event is: " << snapshot.DebugString();
}
}
VLOG(2) << "Number of allocations that cannot find matching dealloctions: "
<< addr_metadata_map.size();
}
int64_t GetPeakMemoryStep(int64_t peak_bytes_profile,
const PerAllocatorMemoryProfile* memory_profile) {
int64_t peak_bytes_profile_step_id = 0;
for (const auto& snapshot : memory_profile->memory_profile_snapshots()) {
if (peak_bytes_profile ==
snapshot.aggregation_stats().heap_allocated_bytes() +
snapshot.aggregation_stats().stack_reserved_bytes()) {
DCHECK(snapshot.has_activity_metadata());
peak_bytes_profile_step_id = snapshot.activity_metadata().step_id();
}
}
return peak_bytes_profile_step_id;
}
struct MetadataComparator {
bool operator()(const IndexMetaPair& a, const IndexMetaPair& b) const {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
DCHECK_NE(a_meta, nullptr);
DCHECK_NE(b_meta, nullptr);
auto lhs =
std::make_tuple(-a_meta->allocation_bytes(), -a_meta->requested_bytes(),
a_meta->tf_op_name(), a_meta->region_type(),
a_meta->data_type(), a_meta->tensor_shape());
auto rhs =
std::make_tuple(-b_meta->allocation_bytes(), -b_meta->requested_bytes(),
b_meta->tf_op_name(), b_meta->region_type(),
b_meta->data_type(), b_meta->tensor_shape());
return lhs < rhs;
}
};
void InsertSpecialAllocations(int64_t unmapped_allocation_bytes,
int64_t step_id,
PerAllocatorMemoryProfile* memory_profile,
std::vector<IndexMetaPair>* active_allocs) {
int index = 0;
if (unmapped_allocation_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(unmapped_allocation_bytes);
special_allocation->set_allocation_bytes(unmapped_allocation_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("unused preallocated device memory");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("persist/dynamic");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
int64_t stack_bytes =
memory_profile->profile_summary().peak_stats().stack_reserved_bytes();
if (stack_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(stack_bytes);
special_allocation->set_allocation_bytes(stack_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("stack");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("stack");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
}
bool operator==(const IndexMetaPair& a, const IndexMetaPair& b) {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
return a_meta->allocation_bytes() == b_meta->allocation_bytes() &&
a_meta->requested_bytes() == b_meta->requested_bytes() &&
a_meta->tf_op_name() == b_meta->tf_op_name() &&
a_meta->region_type() == b_meta->region_type() &&
a_meta->data_type() == b_meta->data_type() &&
a_meta->tensor_shape() == b_meta->tensor_shape();
}
void ProcessActiveAllocations(int64_t peak_bytes_profile_step_id,
PerAllocatorMemoryProfile* memory_profile) {
int64_t unmapped_allocation_bytes =
memory_profile->profile_summary().peak_stats().heap_allocated_bytes();
int64_t unmapped_deallocation_bytes = 0;
absl::flat_hash_map<int64_t , IndexMetaPair> active_alloc_map;
for (int i = 0; i < memory_profile->memory_profile_snapshots_size(); i++) {
const auto& snapshot = memory_profile->memory_profile_snapshots().at(i);
DCHECK(snapshot.has_activity_metadata());
const MemoryActivityMetadata& metadata = snapshot.activity_metadata();
if (snapshot.time_offset_ps() >
memory_profile->profile_summary().peak_stats_time_ps())
break;
if (metadata.step_id() != peak_bytes_profile_step_id) continue;
if (metadata.memory_activity() == ALLOCATION) {
active_alloc_map[metadata.address()] = {i, &metadata};
unmapped_allocation_bytes -= metadata.allocation_bytes();
} else {
DCHECK_EQ(metadata.memory_activity(), DEALLOCATION);
if (active_alloc_map.contains(metadata.address())) {
active_alloc_map.erase(metadata.address());
} else {
unmapped_deallocation_bytes += metadata.allocation_bytes();
}
unmapped_allocation_bytes += metadata.allocation_bytes();
}
}
unmapped_allocation_bytes -= unmapped_deallocation_bytes;
VLOG(2) << "unmapped_allocation_bytes=" << unmapped_allocation_bytes
<< ", unmapped_deallocation_bytes=" << unmapped_deallocation_bytes;
std::vector<IndexMetaPair> active_allocs;
for (const auto& address_and_index_meta : active_alloc_map) {
active_allocs.push_back(address_and_index_meta.second);
}
InsertSpecialAllocations(unmapped_allocation_bytes,
peak_bytes_profile_step_id, memory_profile,
&active_allocs);
std::sort(active_allocs.begin(), active_allocs.end(), MetadataComparator());
for (int i = 0, end = active_allocs.size(); i < end; i++) {
ActiveAllocation* allocation = memory_profile->add_active_allocations();
allocation->set_snapshot_index(active_allocs[i].first);
if (active_allocs[i].first < 0) {
allocation->set_special_index(-active_allocs[i].first - 1);
} else {
allocation->set_special_index(-1);
}
allocation->set_num_occurrences(1);
const int last_alloc = active_allocs.size() - 1;
while (i < last_alloc && active_allocs[i] == active_allocs[i + 1]) {
allocation->set_num_occurrences(allocation->num_occurrences() + 1);
i++;
}
}
VLOG(2) << "Distinctive active allocation count="
<< memory_profile->active_allocations_size();
}
void SaveActiveAllocationSnapshots(
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots,
protobuf::RepeatedPtrField<ActiveAllocation>* active_allocations) {
std::vector<MemoryProfileSnapshot*> samples;
for (const auto& allocation : *active_allocations) {
auto orig_index = allocation.snapshot_index();
if (orig_index < 0) continue;
samples.push_back(&(*snapshots)[orig_index]);
}
int new_index = 0;
for (auto& allocation : *active_allocations) {
int64_t origin_index = allocation.snapshot_index();
if (origin_index < 0) continue;
allocation.set_snapshot_index(new_index);
new_index++;
}
protobuf::RepeatedPtrField<MemoryProfileSnapshot> new_snapshots;
new_snapshots.Reserve(samples.size());
for (const auto& sample : samples) {
*new_snapshots.Add() = std::move(*sample);
}
*snapshots = std::move(new_snapshots);
}
void SampleMemoryProfileTimeline(int64_t max_num_snapshots,
PerAllocatorMemoryProfile* memory_profile) {
const protobuf::RepeatedPtrField<MemoryProfileSnapshot>& original_snapshots =
memory_profile->memory_profile_snapshots();
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* timeline_snapshots =
memory_profile->mutable_sampled_timeline_snapshots();
int64_t snapshot_count = original_snapshots.size();
if (snapshot_count > max_num_snapshots) {
auto max_box_filter = [&](int filter_width, int count, int start) {
for (int i = 0; i < count; i++) {
const MemoryProfileSnapshot* max_snapshot =
&original_snapshots[start + filter_width * i];
int64_t max_bytes =
max_snapshot->aggregation_stats().heap_allocated_bytes() +
max_snapshot->aggregation_stats().stack_reserved_bytes();
for (int index = start + filter_width * i + 1;
index < start + filter_width * (i + 1); index++) {
int64_t bytes = original_snapshots[index]
.aggregation_stats()
.heap_allocated_bytes() +
original_snapshots[index]
.aggregation_stats()
.stack_reserved_bytes();
if (bytes > max_bytes) {
max_snapshot = &original_snapshots[index];
max_bytes = bytes;
}
}
*timeline_snapshots->Add() = *max_snapshot;
}
};
int width = snapshot_count / max_num_snapshots;
int count1 = max_num_snapshots * (width + 1) - snapshot_count;
int count2 = max_num_snapshots - count1;
max_box_filter(width, count1, 0);
max_box_filter(width + 1, count2, width * count1);
} else {
*timeline_snapshots = original_snapshots;
}
}
void ProcessMemoryProfileProto(int64_t max_num_snapshots,
MemoryProfile* memory_profile) {
memory_profile->set_num_hosts(1);
for (const auto& id_and_allocator_profile :
memory_profile->memory_profile_per_allocator()) {
if (!id_and_allocator_profile.second.memory_profile_snapshots().empty()) {
memory_profile->add_memory_ids(id_and_allocator_profile.first);
}
}
absl::c_sort(*memory_profile->mutable_memory_ids());
for (auto& id_and_allocator_profile :
*memory_profile->mutable_memory_profile_per_allocator()) {
PerAllocatorMemoryProfile* allocator_memory_profile =
&id_and_allocator_profile.second;
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots =
allocator_memory_profile->mutable_memory_profile_snapshots();
absl::c_sort(*snapshots, [](const MemoryProfileSnapshot& a,
const MemoryProfileSnapshot& b) {
return a.time_offset_ps() < b.time_offset_ps();
});
UpdateStepId(allocator_memory_profile);
UpdateDeallocation(allocator_memory_profile);
SampleMemoryProfileTimeline(max_num_snapshots, allocator_memory_profile);
int64_t peak_step_id =
GetPeakMemoryStep(allocator_memory_profile->profile_summary()
.peak_stats()
.peak_bytes_in_use(),
allocator_memory_profile);
ProcessActiveAllocations(peak_step_id, allocator_memory_profile);
SaveActiveAllocationSnapshots(
snapshots, allocator_memory_profile->mutable_active_allocations());
}
}
template <typename Proto>
Status ConvertProtoToJson(const Proto& proto_output, std::string* json_output) {
protobuf::util::JsonPrintOptions json_options;
json_options.always_print_primitive_fields = true;
auto status = protobuf::util::MessageToJsonString(proto_output, json_output,
json_options);
if (!status.ok()) {
auto error_msg = status.message();
return errors::Internal(
"Could not convert proto to JSON string: ",
absl::string_view(error_msg.data(), error_msg.length()));
}
return absl::OkStatus();
}
}
MemoryProfile ConvertXPlaneToMemoryProfile(const XPlane& host_plane,
int64_t max_num_snapshots) {
MemoryProfile memory_profile = GenerateMemoryProfile(&host_plane);
ProcessMemoryProfileProto(max_num_snapshots, &memory_profile);
memory_profile.set_version(1);
return memory_profile;
}
Status ConvertXSpaceToMemoryProfileJson(const XSpace& xspace,
std::string* json_output) {
if (const XPlane* host_plane =
FindPlaneWithName(xspace, kHostThreadsPlaneName)) {
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
TF_RETURN_IF_ERROR(ConvertProtoToJson(memory_profile, json_output));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
#include "tsl/profiler/utils/group_events.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXPlaneToMemoryProfile, OneAllocatorMultiActivitiesTest) {
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(1);
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
40000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{3000}},
{StatType::kBytesAvailable, int64_t{5000}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{1}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "foo/bar"},
{StatType::kRegionType, "output"},
{StatType::kTensorShapes, "[3, 3, 512, 512]"}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryDeallocation",
50000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{2744}},
{StatType::kBytesAvailable, int64_t{5256}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{0}},
{StatType::kDataType, int64_t{0}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kRegionType, ""},
{StatType::kTensorShapes, ""}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
70000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{5000}},
{StatType::kBytesAvailable, int64_t{3000}},
{StatType::kPeakBytesInUse, int64_t{9500}},
{StatType::kRequestedBytes, int64_t{300}},
{StatType::kAllocationBytes, int64_t{300}},
{StatType::kAddress, int64_t{345678}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{9}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "mul_grad/Sum"},
{StatType::kRegionType, "temp"},
{StatType::kTensorShapes, "[1, 2]"}});
tsl::profiler::GroupTfEvents(&space);
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().size(), 1);
EXPECT_EQ(memory_profile.num_hosts(), 1);
EXPECT_EQ(memory_profile.memory_ids_size(), 1);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().begin()->first,
"GPU_0_bfc");
EXPECT_EQ(memory_profile.version(), 1);
const auto& allocator_memory_profile =
memory_profile.memory_profile_per_allocator().begin()->second;
EXPECT_EQ(
allocator_memory_profile.profile_summary().peak_bytes_usage_lifetime(),
9500);
EXPECT_EQ(allocator_memory_profile.profile_summary()
.peak_stats()
.peak_bytes_in_use(),
7000);
EXPECT_EQ(allocator_memory_profile.profile_summary().peak_stats_time_ps(),
70000);
EXPECT_EQ(allocator_memory_profile.sampled_timeline_snapshots_size(), 3);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots_size(), 1);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots()
.at(0)
.activity_metadata()
.tf_op_name(),
"mul_grad/Sum");
EXPECT_EQ(allocator_memory_profile.active_allocations_size(), 3);
EXPECT_EQ(
allocator_memory_profile.active_allocations().at(2).snapshot_index(), 0);
EXPECT_EQ(allocator_memory_profile.special_allocations_size(), 2);
EXPECT_EQ(allocator_memory_profile.special_allocations().at(1).tf_op_name(),
"stack");
EXPECT_EQ(
allocator_memory_profile.special_allocations().at(1).allocation_bytes(),
2000);
}
}
}
} |
77 | #ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
#include <string>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
[[noreturn]] void ThrowStdLogicError(const char* what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
[[noreturn]] void ThrowStdDomainError(const char* what_arg);
[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
[[noreturn]] void ThrowStdLengthError(const char* what_arg);
[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRangeError(const char* what_arg);
[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
[[noreturn]] void ThrowStdBadFunctionCall();
[[noreturn]] void ThrowStdBadAlloc();
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/base/internal/throw_delegate.h"
#include <cstdlib>
#include <functional>
#include <new>
#include <stdexcept>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
void ThrowStdLogicError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::logic_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdLogicError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::logic_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdInvalidArgument(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::invalid_argument(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdInvalidArgument(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::invalid_argument(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdDomainError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::domain_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdDomainError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::domain_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdLengthError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::length_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdLengthError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::length_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdOutOfRange(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::out_of_range(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdOutOfRange(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::out_of_range(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdRuntimeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::runtime_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdRuntimeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::runtime_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdRangeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::range_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdRangeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::range_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdOverflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::overflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdOverflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::overflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdUnderflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::underflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
#endif
}
void ThrowStdUnderflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::underflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
#endif
}
void ThrowStdBadFunctionCall() {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::bad_function_call();
#else
std::abort();
#endif
}
void ThrowStdBadAlloc() {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::bad_alloc();
#else
std::abort();
#endif
}
}
ABSL_NAMESPACE_END
} | #include "absl/base/internal/throw_delegate.h"
#include <functional>
#include <new>
#include <stdexcept>
#include "absl/base/config.h"
#include "gtest/gtest.h"
namespace {
using absl::base_internal::ThrowStdLogicError;
using absl::base_internal::ThrowStdInvalidArgument;
using absl::base_internal::ThrowStdDomainError;
using absl::base_internal::ThrowStdLengthError;
using absl::base_internal::ThrowStdOutOfRange;
using absl::base_internal::ThrowStdRuntimeError;
using absl::base_internal::ThrowStdRangeError;
using absl::base_internal::ThrowStdOverflowError;
using absl::base_internal::ThrowStdUnderflowError;
using absl::base_internal::ThrowStdBadFunctionCall;
using absl::base_internal::ThrowStdBadAlloc;
constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog";
template <typename E>
void ExpectThrowChar(void (*f)(const char*)) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
f(what_arg);
FAIL() << "Didn't throw";
} catch (const E& e) {
EXPECT_STREQ(e.what(), what_arg);
}
#else
EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg);
#endif
}
template <typename E>
void ExpectThrowString(void (*f)(const std::string&)) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
f(what_arg);
FAIL() << "Didn't throw";
} catch (const E& e) {
EXPECT_STREQ(e.what(), what_arg);
}
#else
EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg);
#endif
}
template <typename E>
void ExpectThrowNoWhat(void (*f)()) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
f();
FAIL() << "Didn't throw";
} catch (const E& e) {
}
#else
EXPECT_DEATH_IF_SUPPORTED(f(), "");
#endif
}
TEST(ThrowDelegate, ThrowStdLogicErrorChar) {
ExpectThrowChar<std::logic_error>(ThrowStdLogicError);
}
TEST(ThrowDelegate, ThrowStdInvalidArgumentChar) {
ExpectThrowChar<std::invalid_argument>(ThrowStdInvalidArgument);
}
TEST(ThrowDelegate, ThrowStdDomainErrorChar) {
ExpectThrowChar<std::domain_error>(ThrowStdDomainError);
}
TEST(ThrowDelegate, ThrowStdLengthErrorChar) {
ExpectThrowChar<std::length_error>(ThrowStdLengthError);
}
TEST(ThrowDelegate, ThrowStdOutOfRangeChar) {
ExpectThrowChar<std::out_of_range>(ThrowStdOutOfRange);
}
TEST(ThrowDelegate, ThrowStdRuntimeErrorChar) {
ExpectThrowChar<std::runtime_error>(ThrowStdRuntimeError);
}
TEST(ThrowDelegate, ThrowStdRangeErrorChar) {
ExpectThrowChar<std::range_error>(ThrowStdRangeError);
}
TEST(ThrowDelegate, ThrowStdOverflowErrorChar) {
ExpectThrowChar<std::overflow_error>(ThrowStdOverflowError);
}
TEST(ThrowDelegate, ThrowStdUnderflowErrorChar) {
ExpectThrowChar<std::underflow_error>(ThrowStdUnderflowError);
}
TEST(ThrowDelegate, ThrowStdLogicErrorString) {
ExpectThrowString<std::logic_error>(ThrowStdLogicError);
}
TEST(ThrowDelegate, ThrowStdInvalidArgumentString) {
ExpectThrowString<std::invalid_argument>(ThrowStdInvalidArgument);
}
TEST(ThrowDelegate, ThrowStdDomainErrorString) {
ExpectThrowString<std::domain_error>(ThrowStdDomainError);
}
TEST(ThrowDelegate, ThrowStdLengthErrorString) {
ExpectThrowString<std::length_error>(ThrowStdLengthError);
}
TEST(ThrowDelegate, ThrowStdOutOfRangeString) {
ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange);
}
TEST(ThrowDelegate, ThrowStdRuntimeErrorString) {
ExpectThrowString<std::runtime_error>(ThrowStdRuntimeError);
}
TEST(ThrowDelegate, ThrowStdRangeErrorString) {
ExpectThrowString<std::range_error>(ThrowStdRangeError);
}
TEST(ThrowDelegate, ThrowStdOverflowErrorString) {
ExpectThrowString<std::overflow_error>(ThrowStdOverflowError);
}
TEST(ThrowDelegate, ThrowStdUnderflowErrorString) {
ExpectThrowString<std::underflow_error>(ThrowStdUnderflowError);
}
TEST(ThrowDelegate, ThrowStdBadFunctionCallNoWhat) {
#ifdef ABSL_HAVE_EXCEPTIONS
try {
ThrowStdBadFunctionCall();
FAIL() << "Didn't throw";
} catch (const std::bad_function_call&) {
}
#ifdef _LIBCPP_VERSION
catch (const std::exception&) {
}
#endif
#else
EXPECT_DEATH_IF_SUPPORTED(ThrowStdBadFunctionCall(), "");
#endif
}
TEST(ThrowDelegate, ThrowStdBadAllocNoWhat) {
ExpectThrowNoWhat<std::bad_alloc>(ThrowStdBadAlloc);
}
} |
78 | #ifndef STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
#define STORAGE_LEVELDB_HELPERS_MEMENV_MEMENV_H_
#include "leveldb/export.h"
namespace leveldb {
class Env;
LEVELDB_EXPORT Env* NewMemEnv(Env* base_env);
}
#endif
#include "helpers/memenv/memenv.h"
#include <cstring>
#include <limits>
#include <map>
#include <string>
#include <vector>
#include "leveldb/env.h"
#include "leveldb/status.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/mutexlock.h"
namespace leveldb {
namespace {
class FileState {
public:
FileState() : refs_(0), size_(0) {}
FileState(const FileState&) = delete;
FileState& operator=(const FileState&) = delete;
void Ref() {
MutexLock lock(&refs_mutex_);
++refs_;
}
void Unref() {
bool do_delete = false;
{
MutexLock lock(&refs_mutex_);
--refs_;
assert(refs_ >= 0);
if (refs_ <= 0) {
do_delete = true;
}
}
if (do_delete) {
delete this;
}
}
uint64_t Size() const {
MutexLock lock(&blocks_mutex_);
return size_;
}
void Truncate() {
MutexLock lock(&blocks_mutex_);
for (char*& block : blocks_) {
delete[] block;
}
blocks_.clear();
size_ = 0;
}
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
MutexLock lock(&blocks_mutex_);
if (offset > size_) {
return Status::IOError("Offset greater than file size.");
}
const uint64_t available = size_ - offset;
if (n > available) {
n = static_cast<size_t>(available);
}
if (n == 0) {
*result = Slice();
return Status::OK();
}
assert(offset / kBlockSize <= std::numeric_limits<size_t>::max());
size_t block = static_cast<size_t>(offset / kBlockSize);
size_t block_offset = offset % kBlockSize;
size_t bytes_to_copy = n;
char* dst = scratch;
while (bytes_to_copy > 0) {
size_t avail = kBlockSize - block_offset;
if (avail > bytes_to_copy) {
avail = bytes_to_copy;
}
std::memcpy(dst, blocks_[block] + block_offset, avail);
bytes_to_copy -= avail;
dst += avail;
block++;
block_offset = 0;
}
*result = Slice(scratch, n);
return Status::OK();
}
Status Append(const Slice& data) {
const char* src = data.data();
size_t src_len = data.size();
MutexLock lock(&blocks_mutex_);
while (src_len > 0) {
size_t avail;
size_t offset = size_ % kBlockSize;
if (offset != 0) {
avail = kBlockSize - offset;
} else {
blocks_.push_back(new char[kBlockSize]);
avail = kBlockSize;
}
if (avail > src_len) {
avail = src_len;
}
std::memcpy(blocks_.back() + offset, src, avail);
src_len -= avail;
src += avail;
size_ += avail;
}
return Status::OK();
}
private:
enum { kBlockSize = 8 * 1024 };
~FileState() { Truncate(); }
port::Mutex refs_mutex_;
int refs_ GUARDED_BY(refs_mutex_);
mutable port::Mutex blocks_mutex_;
std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
uint64_t size_ GUARDED_BY(blocks_mutex_);
};
class SequentialFileImpl : public SequentialFile {
public:
explicit SequentialFileImpl(FileState* file) : file_(file), pos_(0) {
file_->Ref();
}
~SequentialFileImpl() override { file_->Unref(); }
Status Read(size_t n, Slice* result, char* scratch) override {
Status s = file_->Read(pos_, n, result, scratch);
if (s.ok()) {
pos_ += result->size();
}
return s;
}
Status Skip(uint64_t n) override {
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
const uint64_t available = file_->Size() - pos_;
if (n > available) {
n = available;
}
pos_ += n;
return Status::OK();
}
private:
FileState* file_;
uint64_t pos_;
};
class RandomAccessFileImpl : public RandomAccessFile {
public:
explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
~RandomAccessFileImpl() override { file_->Unref(); }
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
return file_->Read(offset, n, result, scratch);
}
private:
FileState* file_;
};
class WritableFileImpl : public WritableFile {
public:
WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
~WritableFileImpl() override { file_->Unref(); }
Status Append(const Slice& data) override { return file_->Append(data); }
Status Close() override { return Status::OK(); }
Status Flush() override { return Status::OK(); }
Status Sync() override { return Status::OK(); }
private:
FileState* file_;
};
class NoOpLogger : public Logger {
public:
void Logv(const char* format, std::va_list ap) override {}
};
class InMemoryEnv : public EnvWrapper {
public:
explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
~InMemoryEnv() override {
for (const auto& kvp : file_map_) {
kvp.second->Unref();
}
}
Status NewSequentialFile(const std::string& fname,
SequentialFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = nullptr;
return Status::IOError(fname, "File not found");
}
*result = new SequentialFileImpl(file_map_[fname]);
return Status::OK();
}
Status NewRandomAccessFile(const std::string& fname,
RandomAccessFile** result) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
*result = nullptr;
return Status::IOError(fname, "File not found");
}
*result = new RandomAccessFileImpl(file_map_[fname]);
return Status::OK();
}
Status NewWritableFile(const std::string& fname,
WritableFile** result) override {
MutexLock lock(&mutex_);
FileSystem::iterator it = file_map_.find(fname);
FileState* file;
if (it == file_map_.end()) {
file = new FileState();
file->Ref();
file_map_[fname] = file;
} else {
file = it->second;
file->Truncate();
}
*result = new WritableFileImpl(file);
return Status::OK();
}
Status NewAppendableFile(const std::string& fname,
WritableFile** result) override {
MutexLock lock(&mutex_);
FileState** sptr = &file_map_[fname];
FileState* file = *sptr;
if (file == nullptr) {
file = new FileState();
file->Ref();
}
*result = new WritableFileImpl(file);
return Status::OK();
}
bool FileExists(const std::string& fname) override {
MutexLock lock(&mutex_);
return file_map_.find(fname) != file_map_.end();
}
Status GetChildren(const std::string& dir,
std::vector<std::string>* result) override {
MutexLock lock(&mutex_);
result->clear();
for (const auto& kvp : file_map_) {
const std::string& filename = kvp.first;
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
Slice(filename).starts_with(Slice(dir))) {
result->push_back(filename.substr(dir.size() + 1));
}
}
return Status::OK();
}
void RemoveFileInternal(const std::string& fname)
EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (file_map_.find(fname) == file_map_.end()) {
return;
}
file_map_[fname]->Unref();
file_map_.erase(fname);
}
Status RemoveFile(const std::string& fname) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
}
RemoveFileInternal(fname);
return Status::OK();
}
Status CreateDir(const std::string& dirname) override { return Status::OK(); }
Status RemoveDir(const std::string& dirname) override { return Status::OK(); }
Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found");
}
*file_size = file_map_[fname]->Size();
return Status::OK();
}
Status RenameFile(const std::string& src,
const std::string& target) override {
MutexLock lock(&mutex_);
if (file_map_.find(src) == file_map_.end()) {
return Status::IOError(src, "File not found");
}
RemoveFileInternal(target);
file_map_[target] = file_map_[src];
file_map_.erase(src);
return Status::OK();
}
Status LockFile(const std::string& fname, FileLock** lock) override {
*lock = new FileLock;
return Status::OK();
}
Status UnlockFile(FileLock* lock) override {
delete lock;
return Status::OK();
}
Status GetTestDirectory(std::string* path) override {
*path = "/test";
return Status::OK();
}
Status NewLogger(const std::string& fname, Logger** result) override {
*result = new NoOpLogger;
return Status::OK();
}
private:
typedef std::map<std::string, FileState*> FileSystem;
port::Mutex mutex_;
FileSystem file_map_ GUARDED_BY(mutex_);
};
}
Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
} | #include "helpers/memenv/memenv.h"
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "util/testutil.h"
namespace leveldb {
class MemEnvTest : public testing::Test {
public:
MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
~MemEnvTest() { delete env_; }
Env* env_;
};
TEST_F(MemEnvTest, Basics) {
uint64_t file_size;
WritableFile* writable_file;
std::vector<std::string> children;
ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
ASSERT_TRUE(!env_->FileExists("/dir/non_existent"));
ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size());
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size);
delete writable_file;
ASSERT_TRUE(env_->FileExists("/dir/f"));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size);
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(1, children.size());
ASSERT_EQ("f", children[0]);
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("abc"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewAppendableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(3, file_size);
ASSERT_LEVELDB_OK(writable_file->Append("hello"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(8, file_size);
ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
ASSERT_LEVELDB_OK(env_->RenameFile("/dir/f", "/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/f"));
ASSERT_TRUE(env_->FileExists("/dir/g"));
ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/g", &file_size));
ASSERT_EQ(8, file_size);
SequentialFile* seq_file;
RandomAccessFile* rand_file;
ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok());
ASSERT_TRUE(!seq_file);
ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok());
ASSERT_TRUE(!rand_file);
ASSERT_TRUE(!env_->RemoveFile("/dir/non_existent").ok());
ASSERT_LEVELDB_OK(env_->RemoveFile("/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/g"));
ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size());
ASSERT_LEVELDB_OK(env_->RemoveDir("/dir"));
}
TEST_F(MemEnvTest, ReadWrite) {
WritableFile* writable_file;
SequentialFile* seq_file;
RandomAccessFile* rand_file;
Slice result;
char scratch[100];
ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("hello "));
ASSERT_LEVELDB_OK(writable_file->Append("world"));
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_LEVELDB_OK(seq_file->Read(5, &result, scratch));
ASSERT_EQ(0, result.compare("hello"));
ASSERT_LEVELDB_OK(seq_file->Skip(1));
ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.compare("world"));
ASSERT_LEVELDB_OK(
seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
ASSERT_LEVELDB_OK(seq_file->Skip(100));
ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size());
delete seq_file;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
ASSERT_LEVELDB_OK(rand_file->Read(6, 5, &result, scratch));
ASSERT_EQ(0, result.compare("world"));
ASSERT_LEVELDB_OK(rand_file->Read(0, 5, &result, scratch));
ASSERT_EQ(0, result.compare("hello"));
ASSERT_LEVELDB_OK(rand_file->Read(10, 100, &result, scratch));
ASSERT_EQ(0, result.compare("d"));
ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
delete rand_file;
}
TEST_F(MemEnvTest, Locks) {
FileLock* lock;
ASSERT_LEVELDB_OK(env_->LockFile("some file", &lock));
ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
}
TEST_F(MemEnvTest, Misc) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
ASSERT_TRUE(!test_dir.empty());
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile("/a/b", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Sync());
ASSERT_LEVELDB_OK(writable_file->Flush());
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
}
TEST_F(MemEnvTest, LargeWrite) {
const size_t kWriteSize = 300 * 1024;
char* scratch = new char[kWriteSize * 2];
std::string write_data;
for (size_t i = 0; i < kWriteSize; ++i) {
write_data.append(1, static_cast<char>(i));
}
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_LEVELDB_OK(writable_file->Append("foo"));
ASSERT_LEVELDB_OK(writable_file->Append(write_data));
delete writable_file;
SequentialFile* seq_file;
Slice result;
ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_LEVELDB_OK(seq_file->Read(3, &result, scratch));
ASSERT_EQ(0, result.compare("foo"));
size_t read = 0;
std::string read_data;
while (read < kWriteSize) {
ASSERT_LEVELDB_OK(seq_file->Read(kWriteSize - read, &result, scratch));
read_data.append(result.data(), result.size());
read += result.size();
}
ASSERT_TRUE(write_data == read_data);
delete seq_file;
delete[] scratch;
}
TEST_F(MemEnvTest, OverwriteOpenFile) {
const char kWrite1Data[] = "Write #1 data";
const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
const std::string kTestFileName = testing::TempDir() + "leveldb-TestFile.dat";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
RandomAccessFile* rand_file;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
const char kWrite2Data[] = "Write #2 data";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
Slice result;
char scratch[kFileDataLen];
ASSERT_LEVELDB_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
ASSERT_EQ(0, result.compare(kWrite2Data));
delete rand_file;
}
TEST_F(MemEnvTest, DBTest) {
Options options;
options.create_if_missing = true;
options.env = env_;
DB* db;
const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
ASSERT_LEVELDB_OK(DB::Open(options, "/dir/db", &db));
for (size_t i = 0; i < 3; ++i) {
ASSERT_LEVELDB_OK(db->Put(WriteOptions(), keys[i], vals[i]));
}
for (size_t i = 0; i < 3; ++i) {
std::string res;
ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]);
}
Iterator* iterator = db->NewIterator(ReadOptions());
iterator->SeekToFirst();
for (size_t i = 0; i < 3; ++i) {
ASSERT_TRUE(iterator->Valid());
ASSERT_TRUE(keys[i] == iterator->key());
ASSERT_TRUE(vals[i] == iterator->value());
iterator->Next();
}
ASSERT_TRUE(!iterator->Valid());
delete iterator;
DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
for (size_t i = 0; i < 3; ++i) {
std::string res;
ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]);
}
delete db;
}
} |
79 | #ifndef THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_BIND_PROTO_TO_ACTIVATION_H_
#define THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_BIND_PROTO_TO_ACTIVATION_H_
#include "absl/status/status.h"
#include "common/casting.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "extensions/protobuf/internal/message.h"
#include "extensions/protobuf/value.h"
#include "internal/status_macros.h"
#include "runtime/activation.h"
#include "google/protobuf/descriptor.h"
namespace cel::extensions {
enum class BindProtoUnsetFieldBehavior {
kBindDefaultValue,
kSkip
};
namespace protobuf_internal {
absl::Status BindProtoToActivation(
const google::protobuf::Descriptor& descriptor, const StructValue& struct_value,
ValueManager& value_manager, Activation& activation,
BindProtoUnsetFieldBehavior unset_field_behavior =
BindProtoUnsetFieldBehavior::kSkip);
}
template <typename T>
absl::Status BindProtoToActivation(
const T& context, ValueManager& value_manager, Activation& activation,
BindProtoUnsetFieldBehavior unset_field_behavior =
BindProtoUnsetFieldBehavior::kSkip) {
static_assert(protobuf_internal::IsProtoMessage<T>);
CEL_ASSIGN_OR_RETURN(Value parent,
ProtoMessageToValue(value_manager, context));
if (!InstanceOf<StructValue>(parent)) {
return absl::InvalidArgumentError(
absl::StrCat("context is a well-known type: ", context.GetTypeName()));
}
const StructValue& struct_value = Cast<StructValue>(parent);
const google::protobuf::Descriptor* descriptor = context.GetDescriptor();
if (descriptor == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("context missing descriptor: ", context.GetTypeName()));
}
return protobuf_internal::BindProtoToActivation(*descriptor, struct_value,
value_manager, activation,
unset_field_behavior);
}
}
#endif
#include "extensions/protobuf/bind_proto_to_activation.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "common/value.h"
#include "internal/status_macros.h"
#include "runtime/activation.h"
#include "google/protobuf/descriptor.h"
namespace cel::extensions::protobuf_internal {
namespace {
using ::google::protobuf::Descriptor;
absl::StatusOr<bool> ShouldBindField(
const google::protobuf::FieldDescriptor* field_desc, const StructValue& struct_value,
BindProtoUnsetFieldBehavior unset_field_behavior,
ValueManager& value_manager) {
if (unset_field_behavior == BindProtoUnsetFieldBehavior::kBindDefaultValue ||
field_desc->is_repeated()) {
return true;
}
return struct_value.HasFieldByNumber(field_desc->number());
}
absl::StatusOr<Value> GetFieldValue(const google::protobuf::FieldDescriptor* field_desc,
const StructValue& struct_value,
ValueManager& value_manager) {
if (field_desc->cpp_type() == google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE &&
field_desc->message_type()->well_known_type() ==
Descriptor::WELLKNOWNTYPE_ANY) {
CEL_ASSIGN_OR_RETURN(bool present,
struct_value.HasFieldByNumber(field_desc->number()));
if (!present) {
return NullValue();
}
}
return struct_value.GetFieldByNumber(value_manager, field_desc->number());
}
}
absl::Status BindProtoToActivation(
const Descriptor& descriptor, const StructValue& struct_value,
ValueManager& value_manager, Activation& activation,
BindProtoUnsetFieldBehavior unset_field_behavior) {
for (int i = 0; i < descriptor.field_count(); i++) {
const google::protobuf::FieldDescriptor* field_desc = descriptor.field(i);
CEL_ASSIGN_OR_RETURN(bool should_bind,
ShouldBindField(field_desc, struct_value,
unset_field_behavior, value_manager));
if (!should_bind) {
continue;
}
CEL_ASSIGN_OR_RETURN(
Value field, GetFieldValue(field_desc, struct_value, value_manager));
activation.InsertOrAssignValue(field_desc->name(), std::move(field));
}
return absl::OkStatus();
}
} | #include "extensions/protobuf/bind_proto_to_activation.h"
#include "google/protobuf/wrappers.pb.h"
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "extensions/protobuf/type_reflector.h"
#include "internal/testing.h"
#include "runtime/activation.h"
#include "runtime/managed_value_factory.h"
#include "proto/test/v1/proto2/test_all_types.pb.h"
#include "google/protobuf/arena.h"
namespace cel::extensions {
namespace {
using ::cel::test::IntValueIs;
using ::google::api::expr::test::v1::proto2::TestAllTypes;
using testing::Eq;
using testing::HasSubstr;
using testing::Optional;
using cel::internal::IsOkAndHolds;
using cel::internal::StatusIs;
class BindProtoToActivationTest
: public common_internal::ThreadCompatibleValueTest<> {
public:
BindProtoToActivationTest() = default;
};
TEST_P(BindProtoToActivationTest, BindProtoToActivation) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
test_all_types.set_single_int64(123);
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "single_int64"),
IsOkAndHolds(Optional(IntValueIs(123))));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationWktUnsupported) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
google::protobuf::Int64Value int64_value;
int64_value.set_value(123);
Activation activation;
EXPECT_THAT(
BindProtoToActivation(int64_value, value_factory.get(), activation),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("google.protobuf.Int64Value")));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationSkip) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
test_all_types.set_single_int64(123);
Activation activation;
ASSERT_OK(BindProtoToActivation(test_all_types, value_factory.get(),
activation,
BindProtoUnsetFieldBehavior::kSkip));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "single_int32"),
IsOkAndHolds(Eq(absl::nullopt)));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "single_sint32"),
IsOkAndHolds(Eq(absl::nullopt)));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationDefault) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
test_all_types.set_single_int64(123);
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation,
BindProtoUnsetFieldBehavior::kBindDefaultValue));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "single_int32"),
IsOkAndHolds(Optional(IntValueIs(-32))));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "single_sint32"),
IsOkAndHolds(Optional(IntValueIs(0))));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationDefaultAny) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
test_all_types.set_single_int64(123);
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation,
BindProtoUnsetFieldBehavior::kBindDefaultValue));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "single_any"),
IsOkAndHolds(Optional(test::IsNullValue())));
}
MATCHER_P(IsListValueOfSize, size, "") {
const Value& v = arg;
auto value = As<ListValue>(v);
if (!value) {
return false;
}
auto s = value->Size();
return s.ok() && *s == size;
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationRepeated) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
test_all_types.add_repeated_int64(123);
test_all_types.add_repeated_int64(456);
test_all_types.add_repeated_int64(789);
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "repeated_int64"),
IsOkAndHolds(Optional(IsListValueOfSize(3))));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationRepeatedEmpty) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
test_all_types.set_single_int64(123);
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "repeated_int32"),
IsOkAndHolds(Optional(IsListValueOfSize(0))));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationRepeatedComplex) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
auto* nested = test_all_types.add_repeated_nested_message();
nested->set_bb(123);
nested = test_all_types.add_repeated_nested_message();
nested->set_bb(456);
nested = test_all_types.add_repeated_nested_message();
nested->set_bb(789);
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation));
EXPECT_THAT(
activation.FindVariable(value_factory.get(), "repeated_nested_message"),
IsOkAndHolds(Optional(IsListValueOfSize(3))));
}
MATCHER_P(IsMapValueOfSize, size, "") {
const Value& v = arg;
auto value = As<MapValue>(v);
if (!value) {
return false;
}
auto s = value->Size();
return s.ok() && *s == size;
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationMap) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
(*test_all_types.mutable_map_int64_int64())[1] = 2;
(*test_all_types.mutable_map_int64_int64())[2] = 4;
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "map_int64_int64"),
IsOkAndHolds(Optional(IsMapValueOfSize(2))));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationMapEmpty) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
test_all_types.set_single_int64(123);
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "map_int32_int32"),
IsOkAndHolds(Optional(IsMapValueOfSize(0))));
}
TEST_P(BindProtoToActivationTest, BindProtoToActivationMapComplex) {
ProtoTypeReflector provider;
ManagedValueFactory value_factory(provider, memory_manager());
TestAllTypes test_all_types;
TestAllTypes::NestedMessage value;
value.set_bb(42);
(*test_all_types.mutable_map_int64_message())[1] = value;
(*test_all_types.mutable_map_int64_message())[2] = value;
Activation activation;
ASSERT_OK(
BindProtoToActivation(test_all_types, value_factory.get(), activation));
EXPECT_THAT(activation.FindVariable(value_factory.get(), "map_int64_message"),
IsOkAndHolds(Optional(IsMapValueOfSize(2))));
}
INSTANTIATE_TEST_SUITE_P(Runner, BindProtoToActivationTest,
::testing::Values(MemoryManagement::kReferenceCounting,
MemoryManagement::kPooling));
}
} |
80 | #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_METADATA_CLIENT_H_
#define TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_METADATA_CLIENT_H_
#include "tsl/platform/cloud/http_request.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/status.h"
namespace tsl {
class ComputeEngineMetadataClient {
public:
explicit ComputeEngineMetadataClient(
std::shared_ptr<HttpRequest::Factory> http_request_factory,
const RetryConfig& config = RetryConfig(
10000,
1000000
));
virtual ~ComputeEngineMetadataClient() {}
virtual Status GetMetadata(const string& path,
std::vector<char>* response_buffer);
private:
std::shared_ptr<HttpRequest::Factory> http_request_factory_;
const RetryConfig retry_config_;
ComputeEngineMetadataClient(const ComputeEngineMetadataClient&) = delete;
void operator=(const ComputeEngineMetadataClient&) = delete;
};
}
#endif
#include "tsl/platform/cloud/compute_engine_metadata_client.h"
#include <cstdlib>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tsl/platform/cloud/curl_http_request.h"
namespace tsl {
namespace {
constexpr char kGceMetadataHost[] = "GCE_METADATA_HOST";
constexpr char kGceMetadataBaseUrl[] =
"http:
}
ComputeEngineMetadataClient::ComputeEngineMetadataClient(
std::shared_ptr<HttpRequest::Factory> http_request_factory,
const RetryConfig& config)
: http_request_factory_(std::move(http_request_factory)),
retry_config_(config) {}
Status ComputeEngineMetadataClient::GetMetadata(
const string& path, std::vector<char>* response_buffer) {
const auto get_metadata_from_gce = [path, response_buffer, this]() {
string metadata_url;
const char* metadata_url_override = std::getenv(kGceMetadataHost);
if (metadata_url_override) {
metadata_url = absl::StrCat("http:
"/computeMetadata/v1/");
} else {
metadata_url = kGceMetadataBaseUrl;
}
std::unique_ptr<HttpRequest> request(http_request_factory_->Create());
request->SetUri(metadata_url + path);
request->AddHeader("Metadata-Flavor", "Google");
request->SetResultBuffer(response_buffer);
TF_RETURN_IF_ERROR(request->Send());
return OkStatus();
};
return RetryingUtils::CallWithRetries(get_metadata_from_gce, retry_config_);
}
} | #include "tsl/platform/cloud/compute_engine_metadata_client.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
class ComputeEngineMetadataClientTest : public ::testing::Test {
protected:
void SetUp() override { ClearEnvVars(); }
void TearDown() override { ClearEnvVars(); }
void ClearEnvVars() { unsetenv("GCE_METADATA_HOST"); }
};
TEST_F(ComputeEngineMetadataClientTest, GetMetadata) {
const string example_response = "example response";
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
example_response)});
std::shared_ptr<HttpRequest::Factory> http_factory =
std::make_shared<FakeHttpRequestFactory>(&requests);
ComputeEngineMetadataClient client(http_factory,
RetryConfig(0 ));
std::vector<char> result;
TF_EXPECT_OK(
client.GetMetadata("instance/service-accounts/default/token", &result));
std::vector<char> expected(example_response.begin(), example_response.end());
EXPECT_EQ(expected, result);
}
TEST_F(ComputeEngineMetadataClientTest, GetCustomMetadataEndpoint) {
const string example_response = "example response";
setenv("GCE_METADATA_HOST", "foo.bar", 1);
std::vector<HttpRequest*> requests(
{new FakeHttpRequest("Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
example_response)});
std::shared_ptr<HttpRequest::Factory> http_factory =
std::make_shared<FakeHttpRequestFactory>(&requests);
ComputeEngineMetadataClient client(http_factory,
RetryConfig(0 ));
std::vector<char> result;
TF_EXPECT_OK(
client.GetMetadata("instance/service-accounts/default/token", &result));
std::vector<char> expected(example_response.begin(), example_response.end());
EXPECT_EQ(expected, result);
}
TEST_F(ComputeEngineMetadataClientTest, RetryOnFailure) {
const string example_response = "example response";
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
example_response)});
std::shared_ptr<HttpRequest::Factory> http_factory =
std::make_shared<FakeHttpRequestFactory>(&requests);
ComputeEngineMetadataClient client(http_factory,
RetryConfig(0 ));
std::vector<char> result;
TF_EXPECT_OK(
client.GetMetadata("instance/service-accounts/default/token", &result));
std::vector<char> expected(example_response.begin(), example_response.end());
EXPECT_EQ(expected, result);
}
} |
81 | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COLLECTIVE_RMA_DISTRIBUTED_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_COLLECTIVE_RMA_DISTRIBUTED_H_
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
class WorkerCacheInterface;
class CollectiveRemoteAccessDistributed : public CollectiveRemoteAccessLocal {
public:
CollectiveRemoteAccessDistributed(
const DeviceMgr* dev_mgr, DeviceResolverInterface* dev_resolver,
std::shared_ptr<UnboundedWorkQueue> work_queue,
WorkerCacheInterface* worker_cache, int64_t step_id, string task_name)
: CollectiveRemoteAccessLocal(dev_mgr, dev_resolver, step_id),
worker_cache_(worker_cache),
work_queue_(std::move(work_queue)),
task_name_(std::move(task_name)) {}
~CollectiveRemoteAccessDistributed() override {}
void RecvFromPeer(const string& peer_device, const string& peer_task,
bool peer_is_local, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
int dev_to_dev_stream_index,
CancellationManager* cancellation_manager,
const StatusCallback& done) override;
void CheckPeerHealth(const string& peer_task, int64_t timeout_in_ms,
const StatusCallback& done) override;
void StartAbort(const Status& s) override;
protected:
WorkerCacheInterface* worker_cache_;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
CancellationManager abortion_cancel_mgr_;
string task_name_;
};
}
#endif
#include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/protobuf_internal.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class RecvBufCall : public CancellableCall {
public:
RecvBufCall(int64_t step_id, const string& peer_device,
const string& peer_task, const string& key, Device* to_device,
DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality,
const DeviceAttributes& server_attributes,
CancellationManager* cancel_mgr, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, peer_task, wc) {
req_.set_step_id(step_id);
req_.set_buf_rendezvous_key(key);
*req_.mutable_client_locality() = client_locality;
*req_.mutable_server_locality() = server_attributes.locality();
req_.set_num_bytes(to_tensor->TotalBytes());
req_.set_buf_ptr(reinterpret_cast<int64_t>(DMAHelper::base(to_tensor)));
req_.set_src_device(peer_device);
req_.set_src_incarnation(server_attributes.incarnation());
req_.set_dst_device(to_device->name());
req_.set_request_id(GetUniqueRequestId());
}
~RecvBufCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->RecvBufAsync(&opts_, &req_, &resp_, done);
}
RecvBufRequest req_;
RecvBufResponse resp_;
};
void PopulateTensorFromExtra(const RecvBufRespExtra& extra,
Tensor* cpu_tensor) {
char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor));
for (const auto& tensor_content_chunk : extra.tensor_content()) {
memcpy(head, std::string(tensor_content_chunk).data(),
tensor_content_chunk.size());
head += tensor_content_chunk.size();
}
}
Status PopulateTensorFromResponse(const RecvBufResponse& response,
Tensor* cpu_tensor) {
const bool has_transport_options = response.has_transport_options();
if (!has_transport_options) return absl::OkStatus();
const int64_t total_bytes = cpu_tensor->TotalBytes();
int64_t num_bytes = 0;
RecvBufRespExtra extra;
response.transport_options().UnpackTo(&extra);
for (const auto& chunk : extra.tensor_content()) {
num_bytes += chunk.size();
}
if (num_bytes != total_bytes) {
return errors::Internal("Tensor Size Mismatch: RecvBufResponse returned ",
num_bytes,
" bytes, expected: ", cpu_tensor->TotalBytes());
}
PopulateTensorFromExtra(extra, cpu_tensor);
return absl::OkStatus();
}
}
void CollectiveRemoteAccessDistributed::RecvFromPeer(
const string& peer_device, const string& peer_task, bool peer_is_local,
const string& key, Device* to_device, DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality, int dev_to_dev_stream_index,
CancellationManager* cancellation_manager, const StatusCallback& done) {
if (peer_is_local) {
CollectiveRemoteAccessLocal::RecvFromPeer(
peer_device, peer_task, peer_is_local, key, to_device, to_device_ctx,
to_alloc_attr, to_tensor, client_locality, dev_to_dev_stream_index,
cancellation_manager, done);
return;
}
struct State {
DeviceAttributes server_attributes;
std::unique_ptr<RecvBufCall> call;
std::unique_ptr<Tensor> cpu_tensor;
};
State* state = new State;
DeviceAttributes server_attributes;
Status s = dev_resolver_->GetDeviceAttributes(peer_device,
&state->server_attributes);
if (!s.ok()) {
delete state;
done(s);
return;
}
Tensor* dst_tensor = nullptr;
Device* cpu_dev = nullptr;
if (to_device->tensorflow_accelerator_device_info()) {
Status status = dev_mgr_->LookupDevice("CPU:0", &cpu_dev);
if (!status.ok()) {
delete state;
done(s);
return;
}
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
tsl::profiler::ScopedMemoryDebugAnnotation op_annotation(
"CollectiveRemoteAccessDistributed::RecvFromPeer"
"::recv_buf_callback",
step_id_, "dynamic", to_tensor->dtype(),
[to_tensor]() { return to_tensor->shape().DebugString(); });
state->cpu_tensor =
std::make_unique<Tensor>(cpu_dev->GetAllocator(cpu_attr),
to_tensor->dtype(), to_tensor->shape());
dst_tensor = state->cpu_tensor.get();
} else {
dst_tensor = to_tensor;
}
auto recv_buf_callback =
[this, state, to_device, to_alloc_attr, to_device_ctx, to_tensor, cpu_dev,
dev_to_dev_stream_index, dst_tensor, done](const Status& s) {
if (s.ok()) {
Status status =
PopulateTensorFromResponse(state->call->resp_, dst_tensor);
if (!status.ok()) {
done(status);
delete state;
return;
}
if (to_device->tensorflow_accelerator_device_info()) {
AllocatorAttributes cpu_attr;
cpu_attr.set_gpu_compatible(true);
CopyTensor::ViaDMA("",
nullptr , to_device_ctx, cpu_dev,
to_device, cpu_attr, to_alloc_attr, dst_tensor,
to_tensor, dev_to_dev_stream_index,
[this, state, done](const Status& s) {
delete state;
work_queue_->Schedule([s, done] { done(s); });
});
return;
}
}
delete state;
done(s);
};
state->call.reset(new RecvBufCall(
step_id_, peer_device, peer_task, key, to_device, to_device_ctx,
to_alloc_attr, dst_tensor, client_locality, state->server_attributes,
cancellation_manager, worker_cache_));
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [state] { state->call->Cancel(); });
if (already_aborted) {
recv_buf_callback(errors::Cancelled("collective ops already aborted"));
} else {
state->call->Start([this, abortion_token,
done = std::move(recv_buf_callback)](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
done(s);
});
}
}
void CollectiveRemoteAccessDistributed::CheckPeerHealth(
const string& peer_task, int64_t timeout_in_ms,
const StatusCallback& done) {
if (peer_task == task_name_) {
done(absl::OkStatus());
return;
}
WorkerInterface* wi = worker_cache_->GetOrCreateWorker(peer_task);
if (wi == nullptr) {
done(errors::InvalidArgument(peer_task,
" not found. It's probably invalid. The "
"valid form is /job:xxx/replica:0/task:N"));
return;
}
auto opts = new CallOptions();
opts->SetTimeout(timeout_in_ms);
auto req = new GetStatusRequest();
auto resp = new GetStatusResponse();
wi->GetStatusAsync(
opts, req, resp, true,
[this, opts, req, resp, wi, peer_task, done](Status s) {
std::vector<DeviceAttributes> cached_attrs;
if (s.ok()) {
s = dev_resolver_->GetAllDeviceAttributes(peer_task, &cached_attrs);
}
if (s.ok()) {
absl::flat_hash_set<uint64> remote_incarnations;
for (const DeviceAttributes& da : resp->device_attributes()) {
remote_incarnations.insert(da.incarnation());
}
for (const DeviceAttributes& attr : cached_attrs) {
if (!remote_incarnations.contains(attr.incarnation())) {
s = errors::FailedPrecondition(
attr.name(), " with incarnation ", attr.incarnation(),
" is not available. This usually means ", peer_task,
" has restarted");
break;
}
}
} else if (absl::IsNotFound(s)) {
s = absl::OkStatus();
}
delete opts;
delete req;
delete resp;
worker_cache_->ReleaseWorker(peer_task, wi);
done(s);
});
}
void CollectiveRemoteAccessDistributed::StartAbort(const Status& s) {
CollectiveRemoteAccessLocal::StartAbort(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_rma_distributed.h"
#include "google/protobuf/any.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/transport_options.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace {
class FakeAllocator : public Allocator {
public:
string Name() override { return "fake"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return port::AlignedMalloc(num_bytes, alignment);
}
void DeallocateRaw(void* ptr) override { return port::AlignedFree(ptr); }
};
static std::unique_ptr<Device> NewDevice(const string& type, const string& name,
Allocator* allocator) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr, Allocator* allocator)
: Device(nullptr, attr), allocator_(allocator) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return allocator_; }
private:
Allocator* const allocator_;
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr, allocator);
}
static int64_t kStepId = 123;
class FakeWorker : public TestWorkerInterface {
public:
FakeWorker(const string& name, DeviceMgr* dev_mgr,
DeviceResolverDistributed* dres, bool is_failed,
bool set_tensor_in_extra)
: name_(name),
device_mgr_(dev_mgr),
device_resolver_(dres),
buf_rendezvous_(kStepId, dev_mgr),
is_failed_(is_failed),
set_tensor_in_extra_(set_tensor_in_extra) {}
BufRendezvous* buf_rendezvous() { return &buf_rendezvous_; }
void GetStatusAsync(CallOptions* opts, const GetStatusRequest* request,
GetStatusResponse* response, bool fail_fast,
StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
std::vector<DeviceAttributes> dev_attr;
device_mgr_->ListDeviceAttributes(&dev_attr);
for (const auto& da : dev_attr) {
*response->add_device_attributes() = da;
}
done(absl::OkStatus());
}
void RecvBufAsync(CallOptions* opts, const RecvBufRequest* request,
RecvBufResponse* response, StatusCallback done) override {
if (is_failed_) {
done(errors::Unavailable("peer down"));
return;
}
opts->SetCancelCallback([this]() {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100);
buf_rendezvous_.StartAbort(errors::Internal("Cancelled"));
});
});
VLOG(2) << "ConsumeBuf key=" << request->buf_rendezvous_key()
<< " src_device=" << request->src_device()
<< " src_incarnation=" << request->src_incarnation();
buf_rendezvous_.ConsumeBuf(
request->buf_rendezvous_key(), request->src_device(),
request->src_incarnation(),
[this, opts, request, response, done](const Status& status,
BufRendezvous::Hook* h) {
Status s = status;
if (s.ok()) {
opts->ClearCancelCallback();
int64_t num_bytes = h->prod_value->TotalBytes();
if (set_tensor_in_extra_) {
RecvBufRespExtra extra;
extra.add_tensor_content(string(
reinterpret_cast<const char*>(DMAHelper::base(h->prod_value)),
num_bytes));
response->mutable_transport_options()->PackFrom(extra);
} else {
if (request->num_bytes() != num_bytes) {
s = errors::Internal("Tensor Size Mismatch.");
} else {
memcpy(reinterpret_cast<void*>(request->buf_ptr()),
DMAHelper::base(h->prod_value), num_bytes);
}
}
}
done(s);
if (h) BufRendezvous::DoneWithHook(h);
},
nullptr );
}
private:
string name_;
DeviceMgr* device_mgr_;
DeviceResolverDistributed* device_resolver_;
BufRendezvous buf_rendezvous_;
bool is_failed_;
const bool set_tensor_in_extra_;
};
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
enum TEST_PARAM_DEVICE_TYPE {
TEST_PARAM_DEVICE_TYPE_CPU = 0,
TEST_PARAM_DEVICE_TYPE_GPU,
};
enum TEST_PARAM_TENSOR_LOC {
TEST_PARAM_TENSOR_LOC_AT_BUF_PTR = 0,
TEST_PARAM_TENSOR_LOC_IN_EXTRA,
};
class CollRMADistTest
: public ::testing::TestWithParam<
std::tuple<TEST_PARAM_DEVICE_TYPE, TEST_PARAM_TENSOR_LOC>> {
protected:
CollRMADistTest()
: work_queue_(
std::make_shared<UnboundedWorkQueue>(Env::Default(), "test")) {}
~CollRMADistTest() override {
for (DeviceMgr* dm : device_mgrs_) {
delete dm;
}
for (auto it : dev_resolvers_) {
delete it.second;
}
for (FakeWorker* w : workers_) {
delete w;
}
}
void SetUp() override {
const int num_workers = 2;
const int num_devices = 1;
string device_type = "CPU";
string dev0_worker_name;
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
if (w == 0) {
dev0_worker_name = name;
}
DefineWorker(name, device_type, num_devices);
}
rma_.reset(new CollectiveRemoteAccessDistributed(
device_mgrs_[0], dev_resolvers_[dev0_worker_name], work_queue_, &wc_,
kStepId, "/job:worker/replica:0/task:0"));
const int kNumElts = 8;
expected_value_ = Tensor(DT_FLOAT, {kNumElts});
to_tensor_ = Tensor(DT_FLOAT, {kNumElts});
large_response_ = Tensor(DT_FLOAT, {2 * kNumElts});
auto exp_alias = expected_value_.flat<float>();
auto to_alias = to_tensor_.flat<float>();
auto large_response_alias = large_response_.flat<float>();
for (int i = 0; i < kNumElts; ++i) {
exp_alias(i) = i;
to_alias(i) = -1;
}
for (int i = 0; i < 2 * kNumElts; ++i) {
large_response_alias(i) = -2;
}
}
void ResolveDeviceAttributes() {
for (auto& dev_resolver_item : dev_resolvers_) {
DeviceResolverDistributed* dev_resolver = dev_resolver_item.second;
for (const auto& item : dev_by_task_) {
TF_CHECK_OK(dev_resolver->UpdateDeviceAttributes(item.second));
}
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i),
&fake_allocator_));
}
DeviceMgr* dev_mgr = new StaticDeviceMgr(std::move(devices));
device_mgrs_.push_back(dev_mgr);
std::vector<DeviceAttributes>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto d : dev_mgr->ListDevices()) {
dv->push_back(d->attributes());
}
DeviceResolverDistributed* dev_res = new DeviceResolverDistributed(dev_mgr);
dev_resolvers_[worker_name] = dev_res;
FakeWorker* fw =
new FakeWorker(worker_name, dev_mgr, dev_res, is_failed,
std::get<TEST_PARAM_TENSOR_LOC>(GetParam()) ==
TEST_PARAM_TENSOR_LOC_IN_EXTRA);
workers_.push_back(fw);
wc_.AddWorker(worker_name, fw);
}
void RestartWorker(const string& worker_name, const string& device_type,
int num_devices, bool is_failed = false) {
auto it = dev_resolvers_.find(worker_name);
if (it != dev_resolvers_.end()) {
delete it->second;
dev_resolvers_.erase(it);
}
DefineWorker(worker_name, device_type, num_devices, is_failed);
}
void ValidateResultTensor() {
ASSERT_EQ(expected_value_.NumElements(), to_tensor_.NumElements());
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(expected_value_.flat<float>()(i),
to_tensor_.flat<float>()(i));
}
}
void ValidateResultTensorUnchanged() {
for (int i = 0; i < to_tensor_.NumElements(); ++i) {
EXPECT_FLOAT_EQ(-1, to_tensor_.flat<float>()(i));
}
}
void MaybeSetGPUDevice(Device* dst_device) {
if (std::get<TEST_PARAM_DEVICE_TYPE>(GetParam()) ==
TEST_PARAM_DEVICE_TYPE_GPU) {
dst_device->set_tensorflow_accelerator_device_info(
&accelerator_device_info_);
}
}
FakeCache wc_;
CancellationManager cm_;
std::vector<DeviceMgr*> device_mgrs_;
std::unordered_map<string, DeviceResolverDistributed*> dev_resolvers_;
std::unordered_map<string, std::vector<DeviceAttributes>> dev_by_task_;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
std::vector<FakeWorker*> workers_;
std::unique_ptr<CollectiveRemoteAccessDistributed> rma_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
CallOptions opts_;
DeviceLocality device_locality_;
AllocatorAttributes alloc_attr_;
FakeAllocator fake_allocator_;
DeviceBase::AcceleratorDeviceInfo accelerator_device_info_;
Tensor expected_value_;
Tensor large_response_;
Tensor to_tensor_;
};
TEST_P(CollRMADistTest, ProdFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstOK) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
}
TEST_P(CollRMADistTest, ConsFirstAbort) {
ResolveDeviceAttributes();
Notification consumer_note;
Status consumer_status;
const string kBufKey = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
rma_->StartAbort(errors::Internal("Deliberate Failure"));
consumer_note.WaitForNotification();
EXPECT_EQ(consumer_status.message(), "Cancelled");
}
TEST_P(CollRMADistTest, ResponseTooLarge) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string kBufKey = "fake_buf_key";
wi->buf_rendezvous()->ProvideBuf(
kBufKey, nullptr , nullptr , &large_response_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
DeviceContext* to_device_ctx = nullptr;
MaybeSetGPUDevice(dst_device);
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
kBufKey, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
consumer_note.WaitForNotification();
EXPECT_THAT(consumer_status.message(),
::testing::HasSubstr("Tensor Size Mismatch"));
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensorUnchanged();
}
TEST_P(CollRMADistTest, WorkerRestart) {
ResolveDeviceAttributes();
Notification consumer_note;
Notification producer_note;
Status consumer_status;
Status producer_status;
FakeWorker* wi = workers_[1];
const string buf_key = "fake_buf_key";
Device* dst_device = nullptr;
string dev_name = "CPU:0";
TF_EXPECT_OK(device_mgrs_[0]->LookupDevice(dev_name, &dst_device));
MaybeSetGPUDevice(dst_device);
DeviceContext* to_device_ctx = nullptr;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &consumer_note](const Status& s) {
consumer_status = s;
consumer_note.Notify();
});
wi->buf_rendezvous()->ProvideBuf(
buf_key, nullptr , nullptr , &expected_value_,
AllocatorAttributes(),
[&producer_note, &producer_status](const Status& s) {
producer_status.Update(s);
producer_note.Notify();
},
nullptr );
consumer_note.WaitForNotification();
TF_EXPECT_OK(consumer_status);
producer_note.WaitForNotification();
TF_EXPECT_OK(producer_status);
ValidateResultTensor();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Notification post_restart_note;
rma_->RecvFromPeer(
"/job:worker/replica:0/task:1/device:" + dev_name,
"/job:worker/replica:0/task:1",
false,
buf_key, dst_device, to_device_ctx, alloc_attr_, &to_tensor_,
device_locality_, 0 ,
nullptr ,
[&consumer_status, &post_restart_note](const Status& s) {
consumer_status = s;
post_restart_note.Notify();
});
post_restart_note.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(consumer_status));
}
TEST_P(CollRMADistTest, CheckHealthOKWithCachedAttr) {
ResolveDeviceAttributes();
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
TF_EXPECT_OK(check_health_status);
}
TEST_P(CollRMADistTest, CheckHealthOKWithoutCachedAttr) {
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(check_health_status.ok());
}
TEST_P(CollRMADistTest, CheckHealthRestarted) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthFailedPeer) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "CPU", 1,
true);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsUnavailable(check_health_status));
}
TEST_P(CollRMADistTest, CheckHealthRestartedWithDifferentDevices) {
ResolveDeviceAttributes();
RestartWorker("/job:worker/replica:0/task:1", "GPU", 1);
Status check_health_status;
Notification check_health_done;
rma_->CheckPeerHealth(
"/job:worker/replica:0/task:1", 0,
[&check_health_status, &check_health_done](const Status s) {
check_health_status = s;
check_health_done.Notify();
});
check_health_done.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(check_health_status));
}
INSTANTIATE_TEST_SUITE_P(
TensorInBufPtrOrExtra, CollRMADistTest,
::testing::Combine(::testing::Values(TEST_PARAM_TENSOR_LOC_AT_BUF_PTR,
TEST_PARAM_TENSOR_LOC_IN_EXTRA),
::testing::Values(TEST_PARAM_DEVICE_TYPE_CPU,
TEST_PARAM_DEVICE_TYPE_GPU)));
}
} |
82 | #ifndef THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_INTERNAL_DURATION_H_
#define THIRD_PARTY_CEL_CPP_EXTENSIONS_PROTOBUF_INTERNAL_DURATION_H_
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "google/protobuf/message.h"
namespace cel::extensions::protobuf_internal {
absl::StatusOr<absl::Duration> UnwrapDynamicDurationProto(
const google::protobuf::Message& message);
absl::Status WrapDynamicDurationProto(absl::Duration value,
google::protobuf::Message& message);
}
#endif
#include "extensions/protobuf/internal/duration.h"
#include <cstdint>
#include "google/protobuf/duration.pb.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "extensions/protobuf/internal/duration_lite.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel::extensions::protobuf_internal {
absl::StatusOr<absl::Duration> UnwrapDynamicDurationProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Duration");
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if (desc == google::protobuf::Duration::descriptor()) {
return UnwrapGeneratedDurationProto(
google::protobuf::DownCastToGenerated<google::protobuf::Duration>(message));
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* seconds_field =
desc->FindFieldByNumber(google::protobuf::Duration::kSecondsFieldNumber);
if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " missing seconds field descriptor"));
}
if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT64)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " has unexpected seconds field type: ",
seconds_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(seconds_field->is_map() ||
seconds_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
seconds_field->name(), " field cardinality: REPEATED"));
}
const auto* nanos_field =
desc->FindFieldByNumber(google::protobuf::Duration::kNanosFieldNumber);
if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing nanos field descriptor"));
}
if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT32)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected nanos field type: ", nanos_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
nanos_field->name(), " field cardinality: REPEATED"));
}
return absl::Seconds(reflect->GetInt64(message, seconds_field)) +
absl::Nanoseconds(reflect->GetInt32(message, nanos_field));
}
absl::Status WrapDynamicDurationProto(absl::Duration value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Duration");
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if (ABSL_PREDICT_TRUE(desc == google::protobuf::Duration::descriptor())) {
return WrapGeneratedDurationProto(
value,
google::protobuf::DownCastToGenerated<google::protobuf::Duration>(message));
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* seconds_field =
desc->FindFieldByNumber(google::protobuf::Duration::kSecondsFieldNumber);
if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " missing seconds field descriptor"));
}
if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT64)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " has unexpected seconds field type: ",
seconds_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(seconds_field->is_map() ||
seconds_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
seconds_field->name(), " field cardinality: REPEATED"));
}
const auto* nanos_field =
desc->FindFieldByNumber(google::protobuf::Duration::kNanosFieldNumber);
if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing nanos field descriptor"));
}
if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT32)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected nanos field type: ", nanos_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
nanos_field->name(), " field cardinality: REPEATED"));
}
reflect->SetInt64(&message, seconds_field,
absl::IDivDuration(value, absl::Seconds(1), &value));
reflect->SetInt32(&message, nanos_field,
static_cast<int32_t>(absl::IDivDuration(
value, absl::Nanoseconds(1), &value)));
return absl::OkStatus();
}
} | #include "extensions/protobuf/internal/duration.h"
#include <memory>
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/memory/memory.h"
#include "absl/time/time.h"
#include "extensions/protobuf/internal/duration_lite.h"
#include "internal/testing.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor_database.h"
#include "google/protobuf/dynamic_message.h"
namespace cel::extensions::protobuf_internal {
namespace {
using testing::Eq;
using cel::internal::IsOkAndHolds;
TEST(Duration, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedDurationProto(google::protobuf::Duration()),
IsOkAndHolds(Eq(absl::ZeroDuration())));
}
TEST(Duration, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Duration::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicDurationProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.Duration"))),
IsOkAndHolds(Eq(absl::ZeroDuration())));
}
TEST(Duration, GeneratedToProto) {
google::protobuf::Duration proto;
ASSERT_OK(WrapGeneratedDurationProto(absl::Seconds(1) + absl::Nanoseconds(2),
proto));
EXPECT_EQ(proto.seconds(), 1);
EXPECT_EQ(proto.nanos(), 2);
}
TEST(Duration, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Duration::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory
.GetPrototype(pool.FindMessageTypeByName("google.protobuf.Duration"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* seconds_field = descriptor->FindFieldByName("seconds");
ASSERT_NE(seconds_field, nullptr);
const auto* nanos_field = descriptor->FindFieldByName("nanos");
ASSERT_NE(nanos_field, nullptr);
ASSERT_OK(WrapDynamicDurationProto(absl::Seconds(1) + absl::Nanoseconds(2),
*proto));
EXPECT_EQ(reflection->GetInt64(*proto, seconds_field), 1);
EXPECT_EQ(reflection->GetInt32(*proto, nanos_field), 2);
}
}
} |
83 | #ifndef TENSORFLOW_CORE_TFRT_IFRT_IFRT_EXECUTABLE_REGISTRY_H_
#define TENSORFLOW_CORE_TFRT_IFRT_IFRT_EXECUTABLE_REGISTRY_H_
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace ifrt_serving {
class ServingExecutableRegistry {
public:
class Handle {
public:
Handle();
Handle(Handle&& other);
Handle& operator=(Handle&& other);
Handle(const Handle&) = delete;
Handle& operator=(const Handle&) = delete;
~Handle();
std::optional<int64_t> program_id() const { return program_id_; }
void Release();
absl::Status Freeze();
private:
friend class ServingExecutableRegistry;
explicit Handle(int64_t program_id);
std::optional<int64_t> program_id_;
};
static absl::StatusOr<Handle> Register(
int64_t program_id, std::unique_ptr<IfrtServingExecutable> executable);
static IfrtServingExecutable* Lookup(int64_t program_id);
private:
friend class Handle;
static absl::Mutex mu_;
static absl::flat_hash_map<int64_t,
std::unique_ptr<IfrtServingExecutable>>* const
executables_ ABSL_GUARDED_BY(&mu_);
};
}
}
#endif
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace ifrt_serving {
ServingExecutableRegistry::Handle::Handle(Handle&& other) {
*this = std::move(other);
}
ServingExecutableRegistry::Handle& ServingExecutableRegistry::Handle::operator=(
Handle&& other) {
if (this != &other) {
program_id_ = std::move(other.program_id_);
other.program_id_ = std::nullopt;
}
return *this;
}
ServingExecutableRegistry::Handle::~Handle() { Release(); }
absl::Status ServingExecutableRegistry::Handle::Freeze() {
if (!program_id_.has_value()) {
return absl::FailedPreconditionError("Program is not registered");
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
return absl::NotFoundError(
absl::StrCat("Program ", *program_id_, " not found in the registry"));
}
VLOG(1) << "Freeze the program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
it->second->Freeze();
return absl::OkStatus();
}
void ServingExecutableRegistry::Handle::Release() {
if (!program_id_.has_value()) {
return;
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
LOG(ERROR) << "Program " << *program_id_ << " not found in the registry";
return;
}
VLOG(1) << "Unregistering program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
ServingExecutableRegistry::executables_->erase(it);
program_id_ = std::nullopt;
}
ServingExecutableRegistry::Handle::Handle(int64_t program_id)
: program_id_(program_id) {}
absl::StatusOr<ServingExecutableRegistry::Handle>
ServingExecutableRegistry::Register(
int64_t program_id, std::unique_ptr<IfrtServingExecutable> executable) {
absl::MutexLock l(&mu_);
VLOG(1) << "Registering program " << program_id << " from signature '"
<< executable->signature_name() << "' of model '"
<< executable->model_name() << "'"
<< ", address is " << executable.get();
if (!executables_->insert({program_id, std::move(executable)}).second) {
return absl::AlreadyExistsError(absl::StrCat(
"Program ", program_id, " already exists in the program registry"));
}
return Handle(program_id);
}
IfrtServingExecutable* ServingExecutableRegistry::Lookup(int64_t program_id) {
absl::ReaderMutexLock l(&mu_);
VLOG(1) << "Looking up program " << program_id;
const auto it = executables_->find(program_id);
return it != executables_->end() ? it->second.get() : nullptr;
}
ABSL_CONST_INIT absl::Mutex ServingExecutableRegistry::mu_(absl::kConstInit);
absl::flat_hash_map<int64_t, std::unique_ptr<IfrtServingExecutable>>* const
ServingExecutableRegistry::executables_ =
new absl::flat_hash_map<int64_t,
std::unique_ptr<IfrtServingExecutable>>();
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
const tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static auto* const thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
absl::StatusOr<std::unique_ptr<IfrtServingExecutable>>
CreateIfrtServingExecutable(mlir::MLIRContext& context, int64_t program_id) {
constexpr absl::string_view kDataDirectory =
"tensorflow/core/tfrt/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/executable.mlir"));
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
if (!mlir_module) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to parse MLIR file: ", mlir_module_path));
}
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
IfrtLoadedVariableRegistry ifrt_loaded_variable_registry;
IfrtRestoreTensorRegistry ifrt_restore_tensor_registry;
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue =
tfrt::CreateMultiThreadedWorkQueue(
4, 4);
TF_ASSIGN_OR_RETURN(std::unique_ptr<tensorflow::StaticDeviceMgr> device_mgr,
CreateTfStaticDeviceMgr());
return IfrtServingExecutable::Create(
program_id, "test", "main", std::move(mlir_module), client,
&GetThreadPool(), &ifrt_loaded_variable_registry,
&ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(),
tensorflow::IdentityShapeRepresentationFn(),
nullptr,
nullptr);
}
TEST(IfrtExecutableRegistry, Basic) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, DuplicateRegistrationFails) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
EXPECT_THAT(
ServingExecutableRegistry::Register(program_id, std::move(executable)),
testing::StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(IfrtExecutableRegistry, ReleaseOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_EQ(ServingExecutableRegistry::Lookup(program_id), nullptr);
}
TEST(IfrtExecutableRegistry, FreezeOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
ASSERT_OK(handle.Freeze());
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, FreezeFailedProgramNotRegistered) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_THAT(handle.Freeze(),
testing::StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(IfrtExecutableRegistry, InvalidProgramIdShallReturnNull) {
int64_t program_id = 1234;
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, nullptr);
}
}
}
} |
84 | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_OPAQUE_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_OPAQUE_TYPE_H_
#include <ostream>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/sized_input_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeView;
class OpaqueType;
class OpaqueTypeView;
namespace common_internal {
struct OpaqueTypeData;
}
class OpaqueType {
public:
using view_alternative_type = OpaqueTypeView;
static constexpr TypeKind kKind = TypeKind::kOpaque;
explicit OpaqueType(OpaqueTypeView other);
OpaqueType(MemoryManagerRef memory_manager, absl::string_view name,
const SizedInputView<TypeView>& parameters);
OpaqueType() = delete;
OpaqueType(const OpaqueType&) = default;
OpaqueType(OpaqueType&&) = default;
OpaqueType& operator=(const OpaqueType&) = default;
OpaqueType& operator=(OpaqueType&&) = default;
constexpr TypeKind kind() const { return kKind; }
absl::string_view name() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
std::string DebugString() const;
void swap(OpaqueType& other) noexcept {
using std::swap;
swap(data_, other.data_);
}
absl::Span<const Type> parameters() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
private:
friend class OpaqueTypeView;
friend struct NativeTypeTraits<OpaqueType>;
Shared<const common_internal::OpaqueTypeData> data_;
};
inline void swap(OpaqueType& lhs, OpaqueType& rhs) noexcept { lhs.swap(rhs); }
bool operator==(const OpaqueType& lhs, const OpaqueType& rhs);
inline bool operator!=(const OpaqueType& lhs, const OpaqueType& rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, const OpaqueType& type);
inline std::ostream& operator<<(std::ostream& out, const OpaqueType& type) {
return out << type.DebugString();
}
template <>
struct NativeTypeTraits<OpaqueType> final {
static NativeTypeId Id(const OpaqueType&) {
return NativeTypeId::For<OpaqueType>();
}
static bool SkipDestructor(const OpaqueType& type) {
return NativeType::SkipDestructor(type.data_);
}
};
template <typename T>
struct NativeTypeTraits<T, std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<OpaqueType, T>>,
std::is_base_of<OpaqueType, T>>>>
final {
static NativeTypeId Id(const T& type) {
return NativeTypeTraits<OpaqueType>::Id(type);
}
static bool SkipDestructor(const T& type) {
return NativeTypeTraits<OpaqueType>::SkipDestructor(type);
}
};
template <typename To, typename From>
struct CastTraits<
To, From,
std::enable_if_t<std::conjunction_v<
std::bool_constant<sizeof(To) == sizeof(absl::remove_cvref_t<From>)>,
std::bool_constant<alignof(To) == alignof(absl::remove_cvref_t<From>)>,
std::is_same<OpaqueType, absl::remove_cvref_t<From>>,
std::negation<std::is_same<OpaqueType, To>>,
std::is_base_of<OpaqueType, To>>>>
final {
static bool Compatible(const absl::remove_cvref_t<From>& from) {
return SubsumptionTraits<To>::IsA(from);
}
static decltype(auto) Convert(From from) {
return SubsumptionTraits<To>::DownCast(std::move(from));
}
};
class OpaqueTypeView {
public:
using alternative_type = OpaqueType;
static constexpr TypeKind kKind = OpaqueType::kKind;
OpaqueTypeView(const OpaqueType& type ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept;
OpaqueTypeView& operator=(
const OpaqueType& type ABSL_ATTRIBUTE_LIFETIME_BOUND) {
data_ = type.data_;
return *this;
}
OpaqueTypeView& operator=(OpaqueType&&) = delete;
OpaqueTypeView() = delete;
OpaqueTypeView(const OpaqueTypeView&) = default;
OpaqueTypeView(OpaqueTypeView&&) = default;
OpaqueTypeView& operator=(const OpaqueTypeView&) = default;
OpaqueTypeView& operator=(OpaqueTypeView&&) = default;
constexpr TypeKind kind() const { return kKind; }
absl::string_view name() const;
std::string DebugString() const;
void swap(OpaqueTypeView& other) noexcept {
using std::swap;
swap(data_, other.data_);
}
absl::Span<const Type> parameters() const;
private:
friend class OpaqueType;
friend struct NativeTypeTraits<OpaqueTypeView>;
SharedView<const common_internal::OpaqueTypeData> data_;
};
inline void swap(OpaqueTypeView& lhs, OpaqueTypeView& rhs) noexcept {
lhs.swap(rhs);
}
bool operator==(OpaqueTypeView lhs, OpaqueTypeView rhs);
inline bool operator!=(OpaqueTypeView lhs, OpaqueTypeView rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, OpaqueTypeView type);
inline std::ostream& operator<<(std::ostream& out, OpaqueTypeView type) {
return out << type.DebugString();
}
template <>
struct NativeTypeTraits<OpaqueTypeView> final {
static NativeTypeId Id(OpaqueTypeView type) {
return NativeTypeId::For<OpaqueTypeView>();
}
};
template <typename T>
struct NativeTypeTraits<T, std::enable_if_t<std::conjunction_v<
std::negation<std::is_same<OpaqueTypeView, T>>,
std::is_base_of<OpaqueTypeView, T>>>>
final {
static NativeTypeId Id(const T& type) {
return NativeTypeTraits<OpaqueTypeView>::Id(type);
}
};
template <typename To, typename From>
struct CastTraits<
To, From,
std::enable_if_t<std::conjunction_v<
std::bool_constant<sizeof(To) == sizeof(absl::remove_cvref_t<From>)>,
std::bool_constant<alignof(To) == alignof(absl::remove_cvref_t<From>)>,
std::is_same<OpaqueTypeView, absl::remove_cvref_t<From>>,
std::negation<std::is_same<OpaqueTypeView, To>>,
std::is_base_of<OpaqueTypeView, To>>>>
final {
static bool Compatible(const absl::remove_cvref_t<From>& from) {
return SubsumptionTraits<To>::IsA(from);
}
static decltype(auto) Convert(From from) {
return SubsumptionTraits<To>::DownCast(std::move(from));
}
};
}
#endif
#include <cstddef>
#include <string>
#include <utility>
#include "absl/container/fixed_array.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/sized_input_view.h"
#include "common/type.h"
namespace cel {
namespace {
std::string OpaqueDebugString(absl::string_view name,
absl::Span<const Type> parameters) {
if (parameters.empty()) {
return std::string(name);
}
return absl::StrCat(
name, "<", absl::StrJoin(parameters, ", ", absl::StreamFormatter()), ">");
}
absl::FixedArray<Type, 1> SizedInputViewToFixedArray(
const SizedInputView<TypeView>& parameters) {
absl::FixedArray<Type, 1> fixed_parameters(parameters.size());
size_t index = 0;
for (const auto& parameter : parameters) {
fixed_parameters[index++] = Type(parameter);
}
ABSL_DCHECK_EQ(index, parameters.size());
return fixed_parameters;
}
}
OpaqueType::OpaqueType(MemoryManagerRef memory_manager, absl::string_view name,
const SizedInputView<TypeView>& parameters)
: data_(memory_manager.MakeShared<common_internal::OpaqueTypeData>(
std::string(name),
SizedInputViewToFixedArray(std::move(parameters)))) {}
std::string OpaqueType::DebugString() const {
return OpaqueDebugString(name(), parameters());
}
std::string OpaqueTypeView::DebugString() const {
return OpaqueDebugString(name(), parameters());
}
} | #include <sstream>
#include <string>
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/memory_testing.h"
#include "common/native_type.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
using testing::TestParamInfo;
using testing::TestWithParam;
class OpaqueTypeTest : public common_internal::ThreadCompatibleMemoryTest<> {};
TEST_P(OpaqueTypeTest, Kind) {
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}).kind(),
OpaqueType::kKind);
EXPECT_EQ(
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})).kind(),
OpaqueType::kKind);
}
TEST_P(OpaqueTypeTest, Name) {
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}).name(),
"test.Opaque");
EXPECT_EQ(
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})).name(),
"test.Opaque");
}
TEST_P(OpaqueTypeTest, DebugString) {
{
std::ostringstream out;
out << OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}));
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << OpaqueType(memory_manager(), "test.Opaque", {});
EXPECT_EQ(out.str(), "test.Opaque");
}
}
TEST_P(OpaqueTypeTest, Hash) {
EXPECT_EQ(
absl::HashOf(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
absl::HashOf(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
}
TEST_P(OpaqueTypeTest, Equal) {
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}),
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}));
EXPECT_EQ(Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}));
EXPECT_EQ(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}),
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
EXPECT_EQ(Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
}
TEST_P(OpaqueTypeTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
NativeTypeId::For<OpaqueType>());
EXPECT_EQ(NativeTypeId::Of(Type(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))),
NativeTypeId::For<OpaqueType>());
}
TEST_P(OpaqueTypeTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<OpaqueType>(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})));
EXPECT_TRUE(InstanceOf<OpaqueType>(
Type(OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))));
}
TEST_P(OpaqueTypeTest, Cast) {
EXPECT_THAT(Cast<OpaqueType>(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
An<OpaqueType>());
EXPECT_THAT(Cast<OpaqueType>(Type(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))),
An<OpaqueType>());
}
TEST_P(OpaqueTypeTest, As) {
EXPECT_THAT(As<OpaqueType>(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()})),
Ne(absl::nullopt));
EXPECT_THAT(As<OpaqueType>(Type(
OpaqueType(memory_manager(), "test.Opaque", {BytesType()}))),
Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
OpaqueTypeTest, OpaqueTypeTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
OpaqueTypeTest::ToString);
class OpaqueTypeViewTest
: public common_internal::ThreadCompatibleMemoryTest<> {};
TEST_P(OpaqueTypeViewTest, Kind) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(OpaqueTypeView(type).kind(), OpaqueTypeView::kKind);
EXPECT_EQ(TypeView(OpaqueTypeView(type)).kind(), OpaqueTypeView::kKind);
}
TEST_P(OpaqueTypeViewTest, Name) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(OpaqueTypeView(type).name(), "test.Opaque");
EXPECT_EQ(TypeView(OpaqueTypeView(type)).name(), "test.Opaque");
}
TEST_P(OpaqueTypeViewTest, DebugString) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
{
std::ostringstream out;
out << OpaqueTypeView(type);
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << TypeView(OpaqueTypeView(type));
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
}
TEST_P(OpaqueTypeViewTest, Hash) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(absl::HashOf(OpaqueTypeView(type)),
absl::HashOf(OpaqueTypeView(type)));
EXPECT_EQ(absl::HashOf(OpaqueTypeView(type)), absl::HashOf(OpaqueType(type)));
}
TEST_P(OpaqueTypeViewTest, Equal) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(OpaqueTypeView(type), OpaqueTypeView(type));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), OpaqueTypeView(type));
EXPECT_EQ(OpaqueTypeView(type), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(OpaqueTypeView(type), OpaqueType(type));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), OpaqueType(type));
EXPECT_EQ(TypeView(OpaqueTypeView(type)), Type(OpaqueType(type)));
EXPECT_EQ(OpaqueType(type), OpaqueTypeView(type));
EXPECT_EQ(OpaqueType(type), OpaqueTypeView(type));
EXPECT_EQ(OpaqueType(type), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(Type(OpaqueType(type)), TypeView(OpaqueTypeView(type)));
EXPECT_EQ(OpaqueTypeView(type), OpaqueType(type));
}
TEST_P(OpaqueTypeViewTest, NativeTypeId) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_EQ(NativeTypeId::Of(OpaqueTypeView(type)),
NativeTypeId::For<OpaqueTypeView>());
EXPECT_EQ(NativeTypeId::Of(TypeView(OpaqueTypeView(type))),
NativeTypeId::For<OpaqueTypeView>());
}
TEST_P(OpaqueTypeViewTest, InstanceOf) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_TRUE(InstanceOf<OpaqueTypeView>(OpaqueTypeView(type)));
EXPECT_TRUE(InstanceOf<OpaqueTypeView>(TypeView(OpaqueTypeView(type))));
}
TEST_P(OpaqueTypeViewTest, Cast) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_THAT(Cast<OpaqueTypeView>(OpaqueTypeView(type)), An<OpaqueTypeView>());
EXPECT_THAT(Cast<OpaqueTypeView>(TypeView(OpaqueTypeView(type))),
An<OpaqueTypeView>());
}
TEST_P(OpaqueTypeViewTest, As) {
auto type = OpaqueType(memory_manager(), "test.Opaque", {BytesType()});
EXPECT_THAT(As<OpaqueTypeView>(OpaqueTypeView(type)), Ne(absl::nullopt));
EXPECT_THAT(As<OpaqueTypeView>(TypeView(OpaqueTypeView(type))),
Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
OpaqueTypeViewTest, OpaqueTypeViewTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
OpaqueTypeViewTest::ToString);
}
} |
85 | #ifndef TENSORFLOW_CORE_IR_UTILITY_H_
#define TENSORFLOW_CORE_IR_UTILITY_H_
#include <optional>
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
namespace mlir {
namespace tfg {
Block::BlockArgListType GetLoopRegionDataArgs(Region ®ion);
Block::BlockArgListType GetLoopRegionControlTokens(Region ®ion);
BlockArgument GetLoopRegionControlOf(BlockArgument data);
BlockArgument GetLoopRegionDataOf(BlockArgument ctl);
Value LookupControlDependency(Value data);
std::optional<Value> LookupDataValue(Value ctl);
template <typename RangeT>
std::pair<RangeT, RangeT> SplitDataAndControlValues(RangeT values,
ControlType ctl_type) {
unsigned num_ctl = 0;
for (Value value : llvm::reverse(values)) {
if (value.getType() == ctl_type)
++num_ctl;
else
break;
}
unsigned split_idx = llvm::size(values) - num_ctl;
return std::make_pair(values.slice(0, split_idx),
values.slice(split_idx, num_ctl));
}
}
}
#endif
#include "tensorflow/core/ir/utility.h"
#include <optional>
#include "mlir/IR/Block.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
namespace tfg {
Block::BlockArgListType GetLoopRegionDataArgs(Region ®ion) {
Block::BlockArgListType args = region.getArguments();
return args.drop_back(args.size() / 2);
}
Block::BlockArgListType GetLoopRegionControlTokens(Region ®ion) {
Block::BlockArgListType args = region.getArguments();
return args.drop_front(args.size() / 2);
}
BlockArgument GetLoopRegionControlOf(BlockArgument data) {
Block &block = *data.getOwner();
return block.getArgument(data.getArgNumber() + block.getNumArguments() / 2);
}
BlockArgument GetLoopRegionDataOf(BlockArgument ctl) {
Block &block = *ctl.getOwner();
return block.getArgument(ctl.getArgNumber() - block.getNumArguments() / 2);
}
Value LookupControlDependency(Value data) {
assert(!mlir::isa<ControlType>(data.getType()) && "expected a data type");
Value control_dep;
if (auto result = mlir::dyn_cast<OpResult>(data)) {
control_dep = *std::prev(result.getOwner()->result_end());
} else {
auto arg = mlir::cast<BlockArgument>(data);
control_dep = cast<ControlArgumentInterface>(arg.getOwner()->getParentOp())
.getControlTokenOf(arg);
}
assert(mlir::isa<ControlType>(control_dep.getType()) &&
"expected a control type");
return control_dep;
}
std::optional<Value> LookupDataValue(Value ctl) {
assert(mlir::isa<ControlType>(ctl.getType()) && "expected a control type");
Value data;
if (auto result = mlir::dyn_cast<OpResult>(ctl)) {
if (result.getOwner()->getNumResults() == 1) return {};
data = *result.getOwner()->result_begin();
} else {
auto arg = mlir::cast<BlockArgument>(ctl);
data = cast<ControlArgumentInterface>(arg.getOwner()->getParentOp())
.getDataValueOf(arg);
}
assert(!mlir::isa<ControlType>(data.getType()) && "expected a data type");
return data;
}
}
} | #include "tensorflow/core/ir/utility.h"
#include <optional>
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(DialectUtilityTest, TestLookupControlDependency) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Copy, %ctl = Copy(%arg) : (tensor<i32>) -> (tensor<i32>)
return(%Copy) : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value copy = ret_op.getOperand(0);
Value ctl = LookupControlDependency(copy);
ASSERT_TRUE(ctl);
OpResult ctl_result = mlir::dyn_cast<OpResult>(ctl);
ASSERT_TRUE(ctl_result);
EXPECT_EQ(ctl_result.getResultNumber(), 1);
EXPECT_EQ(copy, ctl_result.getOwner()->getResult(0));
EXPECT_EQ(ctl_result.getOwner()->getName().getStringRef(), "tfg.Copy");
Value arg = ctl_result.getOwner()->getOperand(0);
Value arg_ctl = LookupControlDependency(arg);
ASSERT_TRUE(arg_ctl);
BlockArgument ctl_arg = mlir::dyn_cast<BlockArgument>(arg_ctl);
ASSERT_TRUE(ctl_arg);
EXPECT_EQ(ctl_arg.getArgNumber(), 1);
EXPECT_EQ(arg, ctl_arg.getOwner()->getArgument(0));
}
TEST(DialectUtilityTest, TestLookupDataValue) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Produce, %ctl = Produce [%arg.ctl] : () -> (tensor<i32>)
return(%arg) [%ctl] : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value ctl = ret_op.getOperand(1);
std::optional<Value> produce = LookupDataValue(ctl);
ASSERT_TRUE(produce);
OpResult produce_result = mlir::dyn_cast<OpResult>(*produce);
ASSERT_TRUE(produce_result);
ASSERT_EQ(produce_result.getResultNumber(), 0);
ASSERT_EQ(produce_result.getOwner()->getName().getStringRef(), "tfg.Produce");
ASSERT_EQ(produce_result.getOwner()->getResult(1), ctl);
Value arg_ctl = produce_result.getOwner()->getOperand(0);
std::optional<Value> arg = LookupDataValue(arg_ctl);
ASSERT_TRUE(arg);
BlockArgument arg_arg = mlir::dyn_cast<BlockArgument>(*arg);
ASSERT_TRUE(arg_arg);
ASSERT_EQ(arg_arg.getArgNumber(), 0);
ASSERT_EQ(arg_arg.getOwner()->getArgument(1), arg_ctl);
}
TEST(DialectUtilityTest, TestLookupDataValueNoData) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%ctl = NoOp [%arg.ctl] : () -> ()
return(%arg) [%ctl] : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value ctl = ret_op.getOperand(1);
std::optional<Value> no_data = LookupDataValue(ctl);
ASSERT_FALSE(no_data);
}
}
}
} |
86 | #ifndef TENSORSTORE_INTERNAL_METRICS_PROMETHEUS_H_
#define TENSORSTORE_INTERNAL_METRICS_PROMETHEUS_H_
#include <map>
#include <string>
#include "absl/functional/function_ref.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_metrics {
struct PushGatewayConfig {
std::string host;
std::string job;
std::string instance;
std::map<std::string, std::string> additional_labels;
};
Result<internal_http::HttpRequest> BuildPrometheusPushRequest(
const PushGatewayConfig& config);
void PrometheusExpositionFormat(
const CollectedMetric& metric,
absl::FunctionRef<void(std::string)> handle_line);
}
}
#endif
#include "tensorstore/internal/metrics/prometheus.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_metrics {
namespace {
static inline constexpr internal::AsciiSet kDigit{"0123456789"};
static inline constexpr internal::AsciiSet kMetricFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_:"};
static inline constexpr internal::AsciiSet kLabelFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"};
static inline constexpr internal::AsciiSet kValueUnreserved{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.~()"};
bool IsLegalPrometheusLabel(std::string_view label) {
if (label.empty() || !kLabelFirst.Test(label[0])) return false;
for (char c : label) {
if (!kLabelFirst.Test(c) && !kDigit.Test(c)) return false;
}
return true;
}
absl::Status AppendLabelValue(std::string* url, std::string_view label,
std::string_view value) {
if (!IsLegalPrometheusLabel(label)) {
return absl::InvalidArgumentError("");
}
if (value.empty()) {
absl::StrAppend(url, "/", label, "@base64/=");
}
for (char c : value) {
if (!kValueUnreserved.Test(c)) {
absl::StrAppend(url, "/", label, "@base64/",
absl::WebSafeBase64Escape(value));
return absl::OkStatus();
}
}
absl::StrAppend(url, "/", label, "/", value);
return absl::OkStatus();
}
std::string AsPrometheusString(std::string_view in, internal::AsciiSet first) {
while (!in.empty() && !first.Test(in[0])) {
in = in.substr(1);
}
while (!in.empty() && !first.Test(in[in.size() - 1]) &&
!kDigit.Test(in[in.size() - 1])) {
in = in.substr(0, in.size() - 1);
}
std::string raw(in);
for (char& c : raw) {
if (!first.Test(c) && !kDigit.Test(c)) c = '_';
}
return raw;
}
struct PrometheusValueLine {
const std::string& metric_name;
const char* suffix;
const std::string& label_str;
std::string operator()(int64_t x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(double x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(const std::string& x) { return {}; }
std::string operator()(std::monostate) { return {}; }
};
}
Result<internal_http::HttpRequest> BuildPrometheusPushRequest(
const PushGatewayConfig& config) {
if (config.job.empty()) {
return absl::InvalidArgumentError("PushGatewayConfig bad job");
}
if (!absl::StartsWith(config.host, "http:
!absl::StartsWith(config.host, "https:
return absl::InvalidArgumentError("PushGatewayConfig bad host");
}
std::string url = config.host;
if (!absl::EndsWith(url, "/")) {
absl::StrAppend(&url, "/metrics");
} else {
absl::StrAppend(&url, "metrics");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, "job", config.job));
if (!config.instance.empty()) {
TENSORSTORE_RETURN_IF_ERROR(
AppendLabelValue(&url, "instance", config.instance));
}
for (const auto& [k, v] : config.additional_labels) {
if (absl::EqualsIgnoreCase("job", k) ||
absl::EqualsIgnoreCase("instance", k)) {
return absl::InvalidArgumentError(
"PushGatewayConfig additional_labels cannot contain job or instance");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, k, v));
}
return internal_http::HttpRequestBuilder("PUT", std::move(url))
.BuildRequest();
}
void PrometheusExpositionFormat(
const CollectedMetric& metric,
absl::FunctionRef<void(std::string)> handle_line) {
std::string metric_name =
AsPrometheusString(metric.metric_name, kMetricFirst);
if (metric_name.empty()) return;
std::vector<std::string> prometheus_fields;
prometheus_fields.reserve(metric.field_names.size());
for (size_t i = 0; i < metric.field_names.size(); ++i) {
prometheus_fields.push_back(
AsPrometheusString(metric.field_names[i], kLabelFirst));
}
auto build_label_str = [&](auto& v) -> std::string {
assert(metric.field_names.size() == v.fields.size());
if (v.fields.empty()) return {};
std::string label_str;
for (size_t i = 0; i < metric.field_names.size(); ++i) {
absl::StrAppend(&label_str, i == 0 ? "" : ", ", prometheus_fields[i],
"=\"", absl::CEscape(v.fields[i]), "\"");
}
return label_str;
};
if (!metric.values.empty()) {
std::string line;
for (const auto& v : metric.values) {
std::string label_str = build_label_str(v);
line =
std::visit(PrometheusValueLine{metric_name, " ", label_str}, v.value);
if (!line.empty()) {
handle_line(std::move(line));
}
line = std::visit(PrometheusValueLine{metric_name, "_max ", label_str},
v.max_value);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
if (!metric.histograms.empty()) {
std::string line;
for (const auto& v : metric.histograms) {
std::string label_str = build_label_str(v);
struct Histogram {
std::vector<int64_t> buckets;
};
line = PrometheusValueLine{metric_name, "_mean ", label_str}(v.mean);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_count ", label_str}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_variance ",
label_str}(v.sum_of_squared_deviation);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_sum ",
label_str}(v.mean * v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) --end;
for (size_t i = 0; i < end; i++) {
std::string bucket_labels = absl::StrCat(
label_str, label_str.empty() ? "" : ", ", "le=\"", i, "\"");
line = PrometheusValueLine{metric_name, "_bucket ",
bucket_labels}(v.buckets[i]);
if (!line.empty()) {
handle_line(std::move(line));
}
}
std::string bucket_labels =
absl::StrCat(label_str, label_str.empty() ? "" : ", ", "le=\"+Inf\"");
line =
PrometheusValueLine{metric_name, "_bucket ", bucket_labels}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
}
}
} | #include "tensorstore/internal/metrics/prometheus.h"
#include <stdint.h>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/collect.h"
namespace {
using ::tensorstore::internal_metrics::BuildPrometheusPushRequest;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::PrometheusExpositionFormat;
using ::tensorstore::internal_metrics::PushGatewayConfig;
TEST(PrometheusTest, BuildPrometheusPushRequest) {
auto request = BuildPrometheusPushRequest(
PushGatewayConfig{"http:
EXPECT_TRUE(request.has_value());
EXPECT_EQ("http:
request->url);
}
TEST(PrometheusTest, PrometheusExpositionFormat) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::string> lines;
PrometheusExpositionFormat(
metric, [&](std::string line) { lines.push_back(std::move(line)); });
return lines;
};
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
EXPECT_THAT(format_lines(metric), ::testing::IsEmpty());
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
h.mean = 1;
h.sum_of_squared_deviation = 1;
h.buckets.push_back(0);
h.buckets.push_back(1);
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
v.max_value = int64_t{2};
EXPECT_THAT(format_lines(metric),
::testing::ElementsAre(
"metric_name {field_name=\"vv\"} 1",
"metric_name_max {field_name=\"vv\"} 2",
"metric_name_mean {field_name=\"hh\"} 1",
"metric_name_count {field_name=\"hh\"} 1",
"metric_name_variance {field_name=\"hh\"} 1",
"metric_name_sum {field_name=\"hh\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"0\"} 0",
"metric_name_bucket {field_name=\"hh\", le=\"1\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"+Inf\"} 1"));
}
} |
87 | #ifndef TENSORFLOW_LITE_DELEGATES_FLEX_BUFFER_MAP_H_
#define TENSORFLOW_LITE_DELEGATES_FLEX_BUFFER_MAP_H_
#include <map>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace flex {
class BufferMap {
public:
BufferMap();
~BufferMap();
bool HasTensor(int tensor_index) const;
tensorflow::Tensor GetTensor(int tensor_index) const;
const tensorflow::Tensor* GetTensorPtr(int tensor_index) const;
void SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor);
void SetFromTfLite(int tensor_index, const TfLiteTensor* tensor,
bool allow_reusing = true);
private:
std::map<int, tensorflow::Tensor> id_to_tensor_;
};
}
}
#endif
#include "tensorflow/lite/delegates/flex/buffer_map.h"
#include <utility>
#include "tensorflow/c/c_api_internal.h"
#include "tensorflow/lite/delegates/flex/buffer_map_util.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace flex {
BufferMap::BufferMap() {}
BufferMap::~BufferMap() {}
bool BufferMap::HasTensor(int tensor_index) const {
return id_to_tensor_.count(tensor_index) != 0;
}
tensorflow::Tensor BufferMap::GetTensor(int tensor_index) const {
return id_to_tensor_.at(tensor_index);
}
const tensorflow::Tensor* BufferMap::GetTensorPtr(int tensor_index) const {
auto& tensor = id_to_tensor_.at(tensor_index);
return &tensor;
}
void BufferMap::SetFromTfLite(int tensor_index, const TfLiteTensor* tensor,
bool allow_reusing) {
TFLITE_CHECK(
SetTfTensorFromTfLite(tensor, &id_to_tensor_[tensor_index], allow_reusing)
.ok());
}
void BufferMap::SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor) {
id_to_tensor_[tensor_index] = std::move(tensor);
}
}
} | #include "tensorflow/lite/delegates/flex/buffer_map.h"
#include <sys/types.h>
#include <functional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/delegates/flex/buffer_map_util.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
namespace {
using ::testing::ElementsAre;
using UniqueTfLiteTensor =
std::unique_ptr<TfLiteTensor, std::function<void(TfLiteTensor*)>>;
template <typename T>
UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
const std::vector<T>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<T>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
return tensor;
}
template <>
UniqueTfLiteTensor MakeLiteTensor<string>(const std::vector<int>& shape,
const std::vector<string>& data) {
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<string>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
TfLiteTensorRealloc(data.size() * sizeof(string), tensor.get());
DynamicBuffer b;
for (const string& s : data) {
b.AddString(s.data(), s.size());
}
b.WriteToTensor(tensor.get(), ConvertVectorToTfLiteIntArray(shape));
return tensor;
}
template <typename T>
tensorflow::Tensor MakeTensor(const std::vector<int64_t>& shape,
const std::vector<T>& data,
tensorflow::DataType dtype) {
tensorflow::Tensor tensor(dtype, tensorflow::TensorShape(shape));
memcpy(tensor.data(), data.data(), data.size() * sizeof(T));
return tensor;
}
std::vector<int64_t> GetTensorShape(const tensorflow::Tensor& t) {
std::vector<int64_t> shape(t.dims());
for (int i = 0; i < t.dims(); ++i) {
shape[i] = t.dim_size(i);
}
return shape;
}
template <typename T>
std::vector<T> GetTensorData(const tensorflow::Tensor& t) {
const T* data = t.flat<T>().data();
return std::vector<T>(data, data + t.NumElements());
}
TEST(BufferMapTest, EmptyBuffer) {
BufferMap buffer_map;
EXPECT_FALSE(buffer_map.HasTensor(0));
}
TEST(BufferMapTest, SetFromTfLite) {
BufferMap buffer_map;
UniqueTfLiteTensor t =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
buffer_map.SetFromTfLite(0, t.get());
ASSERT_TRUE(buffer_map.HasTensor(0));
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTfLiteString) {
BufferMap buffer_map;
UniqueTfLiteTensor t =
MakeLiteTensor<string>({1, 2, 1, 3}, {"", "", "", "str1", "", ""});
buffer_map.SetFromTfLite(0, t.get());
ASSERT_TRUE(buffer_map.HasTensor(0));
EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
ElementsAre("", "", "", "str1", "", ""));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_STRING);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTfLiteTwice) {
UniqueTfLiteTensor t1 =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t1.get());
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, SetFromTfLiteStringTwice) {
UniqueTfLiteTensor t1 =
MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
UniqueTfLiteTensor t2 =
MakeLiteTensor<string>({1, 2, 4}, {"", "", "", "s3", "", "", "s1", "s2"});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t1.get());
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<tensorflow::tstring>(buffer_map.GetTensor(0)),
ElementsAre("", "", "", "s3", "", "", "s1", "s2"));
}
TEST(BufferMapTest, SetFromTfLiteBuiltinResource) {
BufferMap buffer_map;
auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
});
tensor->allocation_type = kTfLiteDynamic;
tensor->type = kTfLiteResource;
tensor->dims = ConvertVectorToTfLiteIntArray({1});
TfLiteTensorRealloc(sizeof(int32_t), tensor.get());
tensor->delegate = nullptr;
tensor->data.i32[0] = 1;
buffer_map.SetFromTfLite(0, tensor.get());
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_RESOURCE);
ASSERT_EQ(out_tensor.NumElements(), 1);
tensorflow::ResourceHandle handle =
out_tensor.flat<tensorflow::ResourceHandle>()(0);
EXPECT_EQ(handle.name(), "tflite_resource_variable:1");
}
TEST(BufferMapTest, SetFromTensorFlow) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
ASSERT_EQ(out_tensor.NumElements(), 6);
ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
}
TEST(BufferMapTest, SetFromTensorFlowTwice) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
tensorflow::Tensor t2 = MakeTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2},
tensorflow::DT_INT32);
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
buffer_map.SetFromTensorFlow(0, t2);
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, TfLiteOverwritesTensorFlow) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTensorFlow(0, t1);
buffer_map.SetFromTfLite(0, t2.get());
EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
}
TEST(BufferMapTest, TensorFlowOverwritesTfLite) {
tensorflow::Tensor t1 = MakeTensor<float>(
{1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0}, tensorflow::DT_FLOAT);
UniqueTfLiteTensor t2 =
MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
BufferMap buffer_map;
buffer_map.SetFromTfLite(0, t2.get());
buffer_map.SetFromTensorFlow(0, t1);
EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
ElementsAre(0, 0, 0, 0.123f, 0, 0));
}
TEST(BufferMapTest, TensorflowBufferReuse) {
TfLiteTensor tensor;
tensor.allocation_type = kTfLiteDynamic;
tensor.data.raw = nullptr;
TfLiteTensorRealloc(10, &tensor);
CHECK(tensor.data.raw);
EXPECT_EQ(tensor.bytes, 10);
TfLiteTensorBuffer* tensor_buffer_reused = new TfLiteTensorBuffer(&tensor);
EXPECT_TRUE(tensor_buffer_reused->BufferReusedFromTfLiteTensor());
EXPECT_EQ(tensor_buffer_reused->data(), tensor.data.raw);
tensor_buffer_reused->Unref();
TfLiteTensorDataFree(&tensor);
}
TEST(BufferMapTest, ExplicitlyDisableBufferReuse) {
TfLiteTensor tensor;
tensor.allocation_type = kTfLiteDynamic;
tensor.data.raw = nullptr;
TfLiteTensorRealloc(10, &tensor);
CHECK(tensor.data.raw);
EXPECT_EQ(tensor.bytes, 10);
TfLiteTensorBuffer* tensor_buffer =
new TfLiteTensorBuffer(&tensor, false);
EXPECT_FALSE(tensor_buffer->BufferReusedFromTfLiteTensor());
EXPECT_NE(tensor_buffer->data(), tensor.data.raw);
tensor_buffer->Unref();
TfLiteTensorDataFree(&tensor);
}
}
}
} |
88 | #ifndef TENSORSTORE_SERIALIZATION_SERIALIZATION_H_
#define TENSORSTORE_SERIALIZATION_SERIALIZATION_H_
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/internal/riegeli/delimited.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/apply_members/apply_members.h"
namespace tensorstore {
namespace serialization {
namespace internal_serialization {
void FailNonNull(DecodeSource& source);
void FailEof(DecodeSource& source);
}
class EncodeSink {
public:
riegeli::Writer& writer() { return writer_; }
void Fail(absl::Status status);
absl::Status status() const { return writer_.status(); }
virtual bool Close() { return writer_.Close(); }
template <typename T,
typename DirectSerializer = Serializer<std::shared_ptr<T>>>
[[nodiscard]] bool Indirect(std::shared_ptr<T> object,
DirectSerializer serializer = {}) {
return DoIndirect(
typeid(std::shared_ptr<T>),
[serializer = std::move(serializer)](
EncodeSink& sink, const std::shared_ptr<void>& value) {
return serializer.Encode(sink, std::static_pointer_cast<T>(value));
},
internal::StaticConstPointerCast<void>(std::move(object)));
}
template <
typename T, typename Traits,
typename DirectSerializer = Serializer<internal::IntrusivePtr<T, Traits>>>
[[nodiscard]] bool Indirect(internal::IntrusivePtr<T, Traits> object,
DirectSerializer serializer = {}) {
return DoIndirect(
typeid(internal::IntrusivePtr<T, Traits>),
[serializer = std::move(serializer)](
EncodeSink& sink, const std::shared_ptr<void>& value) {
return serializer.Encode(sink, internal::IntrusivePtr<T, Traits>(
static_cast<T*>(value.get())));
},
internal::StaticConstPointerCast<void>(
internal::IntrusiveToShared(std::move(object))));
}
using ErasedEncodeWrapperFunction =
poly::Poly<0, true,
bool(EncodeSink& sink,
const std::shared_ptr<void>& erased_value) const>;
[[nodiscard]] virtual bool DoIndirect(const std::type_info& type,
ErasedEncodeWrapperFunction encode,
std::shared_ptr<void> object) = 0;
protected:
explicit EncodeSink(riegeli::Writer& writer) : writer_(writer) {}
~EncodeSink() = default;
private:
riegeli::Writer& writer_;
};
absl::Status DecodeError();
absl::Status DecodeError(std::string_view message);
class DecodeSource {
public:
riegeli::Reader& reader() { return reader_; }
void Fail(absl::Status status);
absl::Status status() const { return reader_.status(); }
virtual absl::Status Done() {
if (reader_.VerifyEndAndClose()) return absl::OkStatus();
return status();
}
template <typename T,
typename DirectSerializer = Serializer<std::shared_ptr<T>>>
[[nodiscard]] bool Indirect(std::shared_ptr<T>& object,
DirectSerializer serializer = {}) {
std::shared_ptr<void> void_ptr;
if (!DoIndirect(
typeid(std::shared_ptr<T>),
[serializer = std::move(serializer)](DecodeSource& source,
std::shared_ptr<void>& value) {
std::shared_ptr<T> typed_value;
if (!serializer.Decode(source, typed_value)) return false;
value = std::move(typed_value);
return true;
},
void_ptr)) {
return false;
}
object = internal::static_pointer_cast<T>(std::move(void_ptr));
return true;
}
template <
typename T, typename Traits,
typename DirectSerializer = Serializer<internal::IntrusivePtr<T, Traits>>>
[[nodiscard]] bool Indirect(internal::IntrusivePtr<T, Traits>& object,
DirectSerializer serializer = {}) {
std::shared_ptr<void> void_ptr;
if (!DoIndirect(
typeid(internal::IntrusivePtr<T, Traits>),
[&serializer](DecodeSource& source, std::shared_ptr<void>& value) {
internal::IntrusivePtr<T, Traits> typed_value;
if (!serializer.Decode(source, typed_value)) return false;
value = internal::StaticConstPointerCast<void>(
internal::IntrusiveToShared(std::move(typed_value)));
return true;
},
void_ptr)) {
return false;
}
object.reset(static_cast<T*>(void_ptr.get()));
return true;
}
using ErasedDecodeWrapperFunction = absl::FunctionRef<bool(
DecodeSource& source, std::shared_ptr<void>& value)>;
[[nodiscard]] virtual bool DoIndirect(const std::type_info& type,
ErasedDecodeWrapperFunction decode,
std::shared_ptr<void>& value) = 0;
protected:
DecodeSource(riegeli::Reader& reader) : reader_(reader) {}
~DecodeSource() = default;
private:
riegeli::Reader& reader_;
};
template <typename T>
struct NonSerializable : public T {
static constexpr auto ApplyMembers = [](auto&& x, auto f) {
return f(internal::BaseCast<T>(x));
};
};
template <typename T>
NonSerializable(const T& x) -> NonSerializable<T>;
template <typename T>
constexpr inline bool IsNonSerializable = false;
template <typename T>
constexpr inline bool IsNonSerializable<NonSerializable<T>> = true;
namespace internal_serialization {
absl::Status NonSerializableError();
}
template <typename T>
struct Serializer<NonSerializable<T>> {
[[nodiscard]] static bool Encode(EncodeSink& sink,
const NonSerializable<T>& value) {
sink.Fail(internal_serialization::NonSerializableError());
return false;
}
[[nodiscard]] static bool Decode(DecodeSource& source,
NonSerializable<T>& value) {
source.Fail(internal_serialization::NonSerializableError());
return false;
}
constexpr static bool non_serializable() { return true; }
};
template <typename Serializer, typename SFINAE = void>
constexpr inline bool IsNonSerializer = false;
template <typename Serializer>
constexpr inline bool IsNonSerializer<
Serializer, std::void_t<decltype(&Serializer::non_serializable)>> =
Serializer::non_serializable();
template <typename T>
constexpr inline bool IsNonSerializableLike = IsNonSerializer<Serializer<T>>;
template <typename T>
struct MemcpySerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink, const T& value) {
return sink.writer().Write(
std::string_view(reinterpret_cast<const char*>(&value), sizeof(T)));
}
[[nodiscard]] static bool Decode(DecodeSource& source, T& value) {
return source.reader().Read(sizeof(T), reinterpret_cast<char*>(&value));
}
};
template <typename T>
struct Serializer<T, std::enable_if_t<SerializeUsingMemcpy<T>>>
: public MemcpySerializer<T> {};
template <>
struct Serializer<bool> {
[[nodiscard]] static bool Encode(EncodeSink& sink, bool value) {
return sink.writer().WriteByte(value);
}
[[nodiscard]] static bool Decode(DecodeSource& source, bool& value) {
uint8_t v;
if (!source.reader().ReadByte(v)) return false;
value = static_cast<bool>(v);
return true;
}
};
template <typename T, typename ElementSerializer = Serializer<T>>
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value,
const ElementSerializer& serialize = {}) {
return serialize.Encode(sink, value);
}
template <typename T,
typename ElementSerializer = Serializer<internal::remove_cvref_t<T>>>
[[nodiscard]] bool Decode(DecodeSource& source, T&& value,
const ElementSerializer& serialize = {}) {
return serialize.Decode(source, value);
}
template <typename String>
struct StringSerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink, const String& value) {
return serialization::WriteDelimited(sink.writer(), value);
}
[[nodiscard]] static bool Decode(DecodeSource& source, String& value) {
return serialization::ReadDelimited(source.reader(), value);
}
};
template <>
struct Serializer<std::string> : public StringSerializer<std::string> {};
template <>
struct Serializer<absl::Cord> : public StringSerializer<absl::Cord> {};
template <>
struct Serializer<std::string_view>
: public StringSerializer<std::string_view> {};
template <typename... T>
[[nodiscard]] ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool EncodeTuple(
EncodeSink& sink, const T&... value) {
return (serialization::Encode(sink, value) && ...);
}
template <typename... T>
[[nodiscard]] ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool DecodeTuple(
DecodeSource& source, T&&... value) {
return (serialization::Decode(source, value) && ...);
}
struct IsAnyNonSerializable {
template <typename... T>
constexpr auto operator()(const T&... arg) const {
return std::integral_constant<bool, (IsNonSerializableLike<T> || ...)>{};
}
};
template <typename T>
struct ApplyMembersSerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink, const T& value) {
return ApplyMembers<T>::Apply(value, [&sink](const auto&... member) {
return (serialization::Encode(sink, member) && ...);
});
}
[[nodiscard]] static bool Decode(DecodeSource& source, T& value) {
return ApplyMembers<T>::Apply(value, [&source](auto&&... member) {
return (serialization::Decode(source, member) && ...);
});
}
constexpr static bool non_serializable() {
return decltype(ApplyMembers<T>::Apply(std::declval<const T&>(),
IsAnyNonSerializable{}))::value;
}
};
template <typename T>
struct Serializer<
T, std::enable_if_t<(SupportsApplyMembers<T> && !IsNonSerializable<T> &&
!SerializeUsingMemcpy<T>)>>
: public ApplyMembersSerializer<T> {};
template <typename T, typename ValueType = typename T::value_type,
typename ElementSerializer = Serializer<ValueType>>
struct ContainerSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
if (!serialization::WriteSize(sink.writer(), value.size())) return false;
for (const auto& element : value) {
if (!serialization::Encode(sink, element, element_serializer)) {
return false;
}
}
return true;
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
value.clear();
size_t size;
if (!serialization::ReadSize(source.reader(), size)) return false;
for (size_t i = 0; i < size; ++i) {
ValueType element;
if (!serialization::Decode(source, element, element_serializer)) {
return false;
}
value.insert(value.end(), std::move(element));
}
return true;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementSerializer element_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<ElementSerializer>;
}
};
template <typename T,
typename ElementSerializer = Serializer<typename T::value_type>>
struct OptionalSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
return serialization::Encode(sink, static_cast<bool>(value)) &&
(!value || element_serializer.Encode(sink, *value));
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
bool has_value;
return serialization::Decode(source, has_value) &&
(!has_value || element_serializer.Decode(source, value.emplace()));
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementSerializer element_serializer;
constexpr static bool non_serializable() {
return IsNonSerializer<ElementSerializer>;
}
};
template <typename T, typename SFINAE = void>
inline constexpr bool IsSerializable = false;
template <typename T>
inline constexpr bool IsSerializable<
T, std::void_t<decltype(Serializer<T>::Encode(std::declval<EncodeSink&>(),
std::declval<const T&>()))>> =
std::is_default_constructible_v<T>;
struct IsNonNull {
template <typename T>
constexpr bool operator()(const T& x) const {
return static_cast<bool>(x);
}
};
struct IsValid {
template <typename T>
constexpr bool operator()(const T& x) const {
return x.valid();
}
};
template <typename T, typename NonNullSerializer,
typename IsNullPredicate = IsNonNull>
struct MaybeNullSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
const bool valid = IsNullPredicate{}(value);
if (!serialization::Encode(sink, valid)) return false;
if (!valid) return true;
return non_null_serializer.Encode(sink, value);
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
bool valid;
if (!serialization::Decode(source, valid)) return false;
if (!valid) return true;
if (!non_null_serializer.Decode(source, value)) return false;
assert(IsNullPredicate{}(value));
return true;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS NonNullSerializer non_null_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<NonNullSerializer>;
}
};
template <typename T, typename BaseSerializer = Serializer<T>,
typename Predicate = IsNonNull>
struct NonNullSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
assert(Predicate{}(value));
return base_serializer.Encode(sink, value);
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
if (!base_serializer.Decode(source, value)) return false;
if (!Predicate{}(value)) {
internal_serialization::FailNonNull(source);
return false;
}
return true;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS BaseSerializer base_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<BaseSerializer>;
}
};
template <typename Pointer,
typename ElementSerializer =
Serializer<std::remove_cv_t<typename Pointer::element_type>>>
struct NonNullPointerSerializer {
using element_type = std::remove_cv_t<typename Pointer::element_type>;
[[nodiscard]] bool Encode(EncodeSink& sink, const Pointer& value) const {
assert(value);
return element_serializer.Encode(sink, *value);
}
[[nodiscard]] bool Decode(DecodeSource& source, Pointer& value) const {
value.reset(new element_type);
return element_serializer.Decode(source, *value);
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementSerializer element_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<ElementSerializer>;
}
};
template <typename Pointer,
typename NonNullSerializer = NonNullPointerSerializer<Pointer>>
using PointerSerializer = MaybeNullSerializer<Pointer, NonNullSerializer>;
template <typename Pointer,
typename NonNullSerializer = NonNullPointerSerializer<Pointer>>
struct NonNullIndirectPointerSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const Pointer& value) const {
assert(value);
return sink.Indirect(value, non_null_serializer);
}
[[nodiscard]] bool Decode(DecodeSource& source, Pointer& value) const {
return | #include "tensorstore/serialization/serialization.h"
#include <cstdint>
#include <map>
#include <set>
#include <string>
#include <tuple>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/std_map.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/serialization/std_set.h"
#include "tensorstore/serialization/std_tuple.h"
#include "tensorstore/serialization/std_variant.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::serialization::IsNonSerializableLike;
using ::tensorstore::serialization::NonSerializable;
using ::tensorstore::serialization::SerializationRoundTrip;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(SerializationTest, Bool) {
TestSerializationRoundTrip(true);
TestSerializationRoundTrip(false);
}
TEST(SerializationTest, Float) {
TestSerializationRoundTrip(3.14f);
TestSerializationRoundTrip(0.0f);
}
TEST(SerializationTest, String) {
TestSerializationRoundTrip(std::string("abcdefg"));
TestSerializationRoundTrip(std::string(""));
}
TEST(CordTest, SerializationRoundTrip) {
TestSerializationRoundTrip(absl::Cord(""));
TestSerializationRoundTrip(absl::Cord("abc"));
}
TEST(SerializationTest, Int32) {
TestSerializationRoundTrip(static_cast<int32_t>(0));
TestSerializationRoundTrip(static_cast<int32_t>(3));
TestSerializationRoundTrip(static_cast<int32_t>(2147483647));
TestSerializationRoundTrip(static_cast<int32_t>(-2147483648));
}
TEST(SerializationTest, VectorInt) {
TestSerializationRoundTrip(std::vector<int>{});
TestSerializationRoundTrip(std::vector<int>{1, 2, 3});
}
TEST(SerializationTest, VectorString) {
TestSerializationRoundTrip(std::vector<std::string>{});
TestSerializationRoundTrip(std::vector<std::string>{"a", "b", "def"});
}
TEST(SerializationTest, VectorVectorString) {
TestSerializationRoundTrip(
std::vector<std::vector<std::string>>{{"a", "b", "def"}, {"e", "f"}});
}
TEST(SerializationTest, Map) {
TestSerializationRoundTrip(std::map<int, std::string>{{1, "a"}, {2, "b"}});
}
TEST(SerializationTest, Set) {
TestSerializationRoundTrip(std::set<int>{1, 2, 3});
}
TEST(SerializationTest, Tuple) {
TestSerializationRoundTrip(
std::tuple(std::string("abc"), 3, std::string("def")));
}
TEST(SerializationTest, UniquePtrNull) {
std::unique_ptr<int> ptr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_FALSE(ptr2);
}
TEST(SerializationTest, UniquePtrNonNull) {
auto ptr = std::make_unique<int>(5);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_THAT(ptr2, ::testing::Pointee(5));
}
TEST(SerializationTest, SharedPtrNull) {
std::shared_ptr<int> ptr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_FALSE(ptr2);
}
TEST(SerializationTest, SharedPtrNonNull) {
auto ptr = std::make_shared<int>(5);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_THAT(ptr2, ::testing::Pointee(5));
}
TEST(SerializationTest, SharedPtrDuplicate) {
auto ptr = std::make_shared<int>(5);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto tuple2, SerializationRoundTrip(std::make_tuple(ptr, ptr)));
EXPECT_THAT(std::get<0>(tuple2), ::testing::Pointee(5));
EXPECT_EQ(std::get<0>(tuple2), std::get<1>(tuple2));
}
struct Foo {
std::string a;
std::string b;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.a, x.b);
};
bool operator==(const Foo& other) const {
return a == other.a && b == other.b;
}
};
TEST(SerializationTest, ApplyMembers) {
TestSerializationRoundTrip(Foo{"xyz", "abcd"});
TestSerializationRoundTrip(Foo{"", "abcd"});
}
TEST(SerialiationTest, Optional) {
TestSerializationRoundTrip(std::optional<int>());
TestSerializationRoundTrip(std::optional<int>(42));
}
TEST(SerialiationTest, Variant) {
TestSerializationRoundTrip(std::variant<int, std::string>(42));
TestSerializationRoundTrip(std::variant<int, std::string>("abc"));
TestSerializationRoundTrip(std::variant<int, int>(std::in_place_index<1>, 1));
TestSerializationRoundTrip(std::variant<int, int>(std::in_place_index<0>, 0));
}
static_assert(!IsNonSerializableLike<Foo>);
static_assert(!IsNonSerializableLike<std::pair<Foo, Foo>>);
static_assert(IsNonSerializableLike<NonSerializable<Foo>>);
static_assert(IsNonSerializableLike<std::pair<Foo, NonSerializable<Foo>>>);
} |
89 | #ifndef THIRD_PARTY_CEL_CPP_RUNTIME_CONSTANT_FOLDING_H_
#define THIRD_PARTY_CEL_CPP_RUNTIME_CONSTANT_FOLDING_H_
#include "absl/status/status.h"
#include "common/memory.h"
#include "runtime/runtime_builder.h"
namespace cel::extensions {
absl::Status EnableConstantFolding(RuntimeBuilder& builder,
MemoryManagerRef memory_manager);
}
#endif
#include "runtime/constant_folding.h"
#include "absl/base/macros.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "eval/compiler/constant_folding.h"
#include "internal/casts.h"
#include "internal/status_macros.h"
#include "runtime/internal/runtime_friend_access.h"
#include "runtime/internal/runtime_impl.h"
#include "runtime/runtime.h"
#include "runtime/runtime_builder.h"
namespace cel::extensions {
namespace {
using ::cel::internal::down_cast;
using ::cel::runtime_internal::RuntimeFriendAccess;
using ::cel::runtime_internal::RuntimeImpl;
absl::StatusOr<RuntimeImpl*> RuntimeImplFromBuilder(RuntimeBuilder& builder) {
Runtime& runtime = RuntimeFriendAccess::GetMutableRuntime(builder);
if (RuntimeFriendAccess::RuntimeTypeId(runtime) !=
NativeTypeId::For<RuntimeImpl>()) {
return absl::UnimplementedError(
"constant folding only supported on the default cel::Runtime "
"implementation.");
}
RuntimeImpl& runtime_impl = down_cast<RuntimeImpl&>(runtime);
return &runtime_impl;
}
}
absl::Status EnableConstantFolding(RuntimeBuilder& builder,
MemoryManagerRef memory_manager) {
CEL_ASSIGN_OR_RETURN(RuntimeImpl * runtime_impl,
RuntimeImplFromBuilder(builder));
ABSL_ASSERT(runtime_impl != nullptr);
runtime_impl->expr_builder().AddProgramOptimizer(
runtime_internal::CreateConstantFoldingOptimizer(memory_manager));
return absl::OkStatus();
}
} | #include "runtime/constant_folding.h"
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "extensions/protobuf/runtime_adapter.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/activation.h"
#include "runtime/managed_value_factory.h"
#include "runtime/register_function_helper.h"
#include "runtime/runtime_builder.h"
#include "runtime/runtime_options.h"
#include "runtime/standard_runtime_builder_factory.h"
namespace cel::extensions {
namespace {
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::parser::Parse;
using testing::HasSubstr;
using cel::internal::StatusIs;
using ValueMatcher = testing::Matcher<Value>;
struct TestCase {
std::string name;
std::string expression;
ValueMatcher result_matcher;
absl::Status status;
};
MATCHER_P(IsIntValue, expected, "") {
const Value& value = arg;
return value->Is<IntValue>() &&
value->As<IntValue>().NativeValue() == expected;
}
MATCHER_P(IsBoolValue, expected, "") {
const Value& value = arg;
return value->Is<BoolValue>() &&
value->As<BoolValue>().NativeValue() == expected;
}
MATCHER_P(IsErrorValue, expected_substr, "") {
const Value& value = arg;
return value->Is<ErrorValue>() &&
absl::StrContains(value->As<ErrorValue>().NativeValue().message(),
expected_substr);
}
class ConstantFoldingExtTest : public testing::TestWithParam<TestCase> {};
TEST_P(ConstantFoldingExtTest, Runner) {
RuntimeOptions options;
const TestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(cel::RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
auto status = RegisterHelper<BinaryFunctionAdapter<
absl::StatusOr<Value>, const StringValue&, const StringValue&>>::
RegisterGlobalOverload(
"prepend",
[](ValueManager& f, const StringValue& value,
const StringValue& prefix) {
return StringValue::Concat(f, prefix, value);
},
builder.function_registry());
ASSERT_OK(status);
ASSERT_OK(
EnableConstantFolding(builder, MemoryManagerRef::ReferenceCounting()));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse(test_case.expression));
ASSERT_OK_AND_ASSIGN(auto program, ProtobufRuntimeAdapter::CreateProgram(
*runtime, parsed_expr));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
auto result = program->Evaluate(activation, value_factory.get());
if (test_case.status.ok()) {
ASSERT_OK_AND_ASSIGN(Value value, std::move(result));
EXPECT_THAT(value, test_case.result_matcher);
return;
}
EXPECT_THAT(result.status(), StatusIs(test_case.status.code(),
HasSubstr(test_case.status.message())));
}
INSTANTIATE_TEST_SUITE_P(
Cases, ConstantFoldingExtTest,
testing::ValuesIn(std::vector<TestCase>{
{"sum", "1 + 2 + 3", IsIntValue(6)},
{"list_create", "[1, 2, 3, 4].filter(x, x < 4).size()", IsIntValue(3)},
{"string_concat", "('12' + '34' + '56' + '78' + '90').size()",
IsIntValue(10)},
{"comprehension", "[1, 2, 3, 4].exists(x, x in [4, 5, 6, 7])",
IsBoolValue(true)},
{"nested_comprehension",
"[1, 2, 3, 4].exists(x, [1, 2, 3, 4].all(y, y <= x))",
IsBoolValue(true)},
{"runtime_error", "[1, 2, 3, 4].exists(x, ['4'].all(y, y <= x))",
IsErrorValue("No matching overloads")},
{"custom_function", "prepend('def', 'abc') == 'abcdef'",
IsBoolValue(true)}}),
[](const testing::TestParamInfo<TestCase>& info) {
return info.param.name;
});
}
} |
90 | #ifndef TENSORFLOW_LITE_CORE_ASYNC_C_ASYNC_SIGNATURE_RUNNER_H_
#define TENSORFLOW_LITE_CORE_ASYNC_C_ASYNC_SIGNATURE_RUNNER_H_
#include <stdbool.h>
#include <stdint.h>
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TfLiteAsyncSignatureRunner TfLiteAsyncSignatureRunner;
TFL_CAPI_EXPORT extern TfLiteAsyncSignatureRunner*
TfLiteInterpreterGetAsyncSignatureRunner(const TfLiteInterpreter* interpreter,
const char* signature_key);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerRegisterBufferSlice(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle buffer_pool, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerUnregisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle handle);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types);
TFL_CAPI_EXPORT extern bool TfLiteAsyncSignatureRunnerReconcileRestrictions(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* name,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict);
TFL_CAPI_EXPORT extern bool
TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
const TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributes(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const char* name, const TfLiteAttributeMap* attrs);
TFL_CAPI_EXPORT extern TfLiteStatus
TfLiteAsyncSignatureRunnerSetAttributesByIndex(
TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* attrs);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerPrepareBackends(
TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern TfLiteExecutionTask*
TfLiteAsyncSignatureRunnerCreateTask(
TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerInvokeAsync(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerWait(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task);
TFL_CAPI_EXPORT extern TfLiteStatus TfLiteAsyncSignatureRunnerFinish(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task);
TFL_CAPI_EXPORT extern size_t TfLiteAsyncSignatureRunnerGetInputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const char* TfLiteAsyncSignatureRunnerGetInputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t input_index);
TFL_CAPI_EXPORT extern size_t TfLiteAsyncSignatureRunnerGetOutputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const char* TfLiteAsyncSignatureRunnerGetOutputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t output_index);
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor*
TfLiteAsyncSignatureRunnerGetInputTensor(
TfLiteAsyncSignatureRunner* async_signature_runner, const char* input_name);
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor*
TfLiteAsyncSignatureRunnerGetOutputTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner,
const char* output_name);
TFL_CAPI_EXPORT extern void TfLiteAsyncSignatureRunnerDelete(
TfLiteAsyncSignatureRunner* signature_runner);
TFL_CAPI_EXPORT extern const int* TfLiteAsyncSignatureRunnerInputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const int* TfLiteAsyncSignatureRunnerOutputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner);
TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor*
TfLiteAsyncSignatureRunnerGetTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner, int index);
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/async/c/async_signature_runner.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/core/async/async_signature_runner.h"
#include "tensorflow/lite/core/async/c/internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
TfLiteAsyncSignatureRunner* TfLiteInterpreterGetAsyncSignatureRunner(
const TfLiteInterpreter* interpreter, const char* signature_key) {
if (!interpreter) return nullptr;
tflite::async::AsyncSignatureRunner* runner =
interpreter->impl->GetAsyncSignatureRunner(signature_key);
if (!runner) return nullptr;
return new TfLiteAsyncSignatureRunner{runner};
}
TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->RegisterBuffer(io_type, buffer, attrs,
handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerRegisterBufferSlice(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle buffer_pool, const TfLiteAttributeMap* attrs,
TfLiteBufferHandle* handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->RegisterBufferSlice(buffer_pool, attrs,
handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerUnregisterBuffer(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteBufferHandle handle) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->UnregisterBuffer(handle);
}
TfLiteStatus TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types) {
if (async_signature_runner == nullptr || types == nullptr ||
num_types == nullptr)
return kTfLiteError;
const auto& buffer_types =
async_signature_runner->impl->SupportedBufferTypes(io_type);
*types = buffer_types.data();
*num_types = buffer_types.size();
return kTfLiteOk;
}
TfLiteStatus TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* const** types, size_t* num_types) {
if (async_signature_runner == nullptr || types == nullptr ||
num_types == nullptr)
return kTfLiteError;
const auto& synchronization_types =
async_signature_runner->impl->SupportedSynchronizations(io_type);
*types = synchronization_types.data();
*num_types = synchronization_types.size();
return kTfLiteOk;
}
bool TfLiteAsyncSignatureRunnerReconcileRestrictions(
const TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteIoType io_type, const char* name,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) {
if (!async_signature_runner) return false;
return async_signature_runner->impl->ReconcileRestrictions(
io_type, name, user_provided_attributes, merged, conflict);
}
bool TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
const TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged, TfLiteAttributeMap* conflict) {
if (!async_signature_runner) return false;
return async_signature_runner->impl->ReconcileRestrictions(
tensor_index, user_provided_attributes, merged, conflict);
}
TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributes(
TfLiteAsyncSignatureRunner* async_signature_runner, TfLiteIoType io_type,
const char* name, const TfLiteAttributeMap* attrs) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->SetAttributes(io_type, name, attrs);
}
TfLiteStatus TfLiteAsyncSignatureRunnerSetAttributesByIndex(
TfLiteAsyncSignatureRunner* async_signature_runner, int tensor_index,
const TfLiteAttributeMap* attrs) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->SetAttributes(tensor_index, attrs);
}
TfLiteStatus TfLiteAsyncSignatureRunnerPrepareBackends(
TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->PrepareBackends();
}
TfLiteExecutionTask* TfLiteAsyncSignatureRunnerCreateTask(
TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->CreateTask();
}
TfLiteStatus TfLiteAsyncSignatureRunnerInvokeAsync(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->InvokeAsync(task);
}
TfLiteStatus TfLiteAsyncSignatureRunnerWait(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->Wait(task);
}
TfLiteStatus TfLiteAsyncSignatureRunnerFinish(
TfLiteAsyncSignatureRunner* async_signature_runner,
TfLiteExecutionTask* task) {
if (!async_signature_runner) return kTfLiteError;
return async_signature_runner->impl->Finish(task);
}
size_t TfLiteAsyncSignatureRunnerGetInputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return 0;
return async_signature_runner->impl->input_size();
}
const char* TfLiteAsyncSignatureRunnerGetInputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t input_index) {
if (!async_signature_runner) return nullptr;
size_t count =
TfLiteAsyncSignatureRunnerGetInputCount(async_signature_runner);
if (input_index < 0 || input_index >= count) {
return nullptr;
}
const auto& input_names = async_signature_runner->impl->input_names();
if (input_index >= input_names.size()) {
return nullptr;
}
return input_names[input_index];
}
size_t TfLiteAsyncSignatureRunnerGetOutputCount(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return 0;
return async_signature_runner->impl->output_size();
}
const char* TfLiteAsyncSignatureRunnerGetOutputName(
const TfLiteAsyncSignatureRunner* async_signature_runner,
int32_t output_index) {
if (!async_signature_runner) return nullptr;
size_t count =
TfLiteAsyncSignatureRunnerGetOutputCount(async_signature_runner);
if (output_index < 0 || output_index >= count) {
return nullptr;
}
const auto& output_names = async_signature_runner->impl->output_names();
if (output_index >= output_names.size()) {
return nullptr;
}
return async_signature_runner->impl->output_names()[output_index];
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetInputTensor(
TfLiteAsyncSignatureRunner* async_signature_runner,
const char* input_name) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->input_tensor(input_name);
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetOutputTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner,
const char* output_name) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->output_tensor(output_name);
}
void TfLiteAsyncSignatureRunnerDelete(
TfLiteAsyncSignatureRunner* signature_runner) {
delete signature_runner;
}
const int* TfLiteAsyncSignatureRunnerInputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->inputs().data();
}
const int* TfLiteAsyncSignatureRunnerOutputTensorIndices(
const TfLiteAsyncSignatureRunner* async_signature_runner) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->outputs().data();
}
const TfLiteOpaqueTensor* TfLiteAsyncSignatureRunnerGetTensor(
const TfLiteAsyncSignatureRunner* async_signature_runner, int index) {
if (!async_signature_runner) return nullptr;
return async_signature_runner->impl->tensor(index);
} | #include "tensorflow/lite/core/async/c/async_signature_runner.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/backend_async_kernel_interface.h"
#include "tensorflow/lite/core/async/c/internal.h"
#include "tensorflow/lite/core/async/c/task.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
#include "tensorflow/lite/core/async/testing/test_backend.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/interpreter_test_util.h"
using ::testing::_;
using ::testing::Return;
namespace tflite {
namespace async {
class AsyncSignatureRunnerTest : public InterpreterTest,
public ::testing::WithParamInterface<bool> {
protected:
void SetUp() override {
kernel_ =
std::make_unique<::testing::StrictMock<testing::MockAsyncKernel>>();
backend_ = std::make_unique<testing::TestBackend>(kernel_->kernel());
auto interpreter = std::make_unique<Interpreter>();
interpreter->AddTensors(2);
interpreter->SetInputs({0});
interpreter->SetOutputs({1});
TfLiteQuantizationParams quant;
interpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "x", {3},
quant);
interpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "a", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
interpreter->AddNodeWithParameters({0, 0}, {1}, nullptr, 0, builtin_data_1,
reg);
tflite_interpreter_.impl = std::move(interpreter);
}
void BuildRunner(bool has_signature) {
auto* interpreter = tflite_interpreter_.impl.get();
if (has_signature) {
const char kSignatureKey[] = "serving_default";
BuildSignature(interpreter, kSignatureKey, {{"input", 0}},
{{"output", 1}});
interpreter->ModifyGraphWithDelegate(backend_->get_delegate());
runner_ = TfLiteInterpreterGetAsyncSignatureRunner(&tflite_interpreter_,
kSignatureKey);
} else {
interpreter->ModifyGraphWithDelegate(backend_->get_delegate());
runner_ = TfLiteInterpreterGetAsyncSignatureRunner(&tflite_interpreter_,
nullptr);
}
ASSERT_NE(nullptr, runner_);
}
void TearDown() override { TfLiteAsyncSignatureRunnerDelete(runner_); }
protected:
TfLiteAsyncSignatureRunner* runner_ = nullptr;
std::unique_ptr<::testing::StrictMock<testing::MockAsyncKernel>> kernel_;
std::unique_ptr<testing::TestBackend> backend_;
internal::SignatureDef signature_def_;
TfLiteInterpreter tflite_interpreter_{};
};
INSTANTIATE_TEST_SUITE_P(AsyncSignatureRunnerTest, AsyncSignatureRunnerTest,
::testing::Bool());
TEST_P(AsyncSignatureRunnerTest, RegisterBufferTest) {
BuildRunner(GetParam());
EXPECT_CALL(*kernel_, RegisterBuffer(_, _, _, _, _))
.WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, RegisterBufferSlice(_, _, _, _))
.WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, UnregisterBuffer(_, _)).WillOnce(Return(kTfLiteOk));
TfLiteBufferHandle handle;
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
auto* buf = TfLiteBackendBufferCreate();
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerRegisterBuffer(
runner_, kTfLiteIoTypeInput, buf, attr, &handle));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerRegisterBufferSlice(
runner_, handle, attr, &handle));
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerUnregisterBuffer(runner_, handle));
TfLiteAttributeMapDelete(attr);
TfLiteBackendBufferDelete(buf);
}
TEST_P(AsyncSignatureRunnerTest, SupportedTypesTest) {
BuildRunner(GetParam());
const char* const* buffer_types = nullptr;
size_t num_buffer_types = 0;
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerGetSupportedBufferTypes(
runner_, kTfLiteIoTypeInput, &buffer_types, &num_buffer_types));
EXPECT_EQ(1, num_buffer_types);
EXPECT_STREQ("buffer_type", buffer_types[0]);
const char* const* sync_types = nullptr;
size_t num_sync_types = 0;
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerGetSupportedSynchronizationTypes(
runner_, kTfLiteIoTypeInput, &sync_types, &num_sync_types));
EXPECT_EQ(1, num_sync_types);
EXPECT_STREQ("sync_type", sync_types[0]);
}
TEST_P(AsyncSignatureRunnerTest, ReconcileTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_CALL(*kernel_, ReconcileRestrictions(_, _, _, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*kernel_, SetAttributes(_, _, _, _)).WillOnce(Return(kTfLiteOk));
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
if (has_signature) {
EXPECT_TRUE(TfLiteAsyncSignatureRunnerReconcileRestrictions(
runner_, kTfLiteIoTypeInput, "input", attr, attr, nullptr));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerSetAttributes(
runner_, kTfLiteIoTypeInput, "input", attr));
} else {
EXPECT_TRUE(TfLiteAsyncSignatureRunnerReconcileRestrictionsByIndex(
runner_, 0, attr, attr, nullptr));
EXPECT_EQ(kTfLiteOk,
TfLiteAsyncSignatureRunnerSetAttributesByIndex(runner_, 0, attr));
}
TfLiteAttributeMapDelete(attr);
}
TEST_P(AsyncSignatureRunnerTest, ExecutionTest) {
BuildRunner(GetParam());
EXPECT_CALL(*kernel_, Prepare(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Eval(_, _, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Wait(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_CALL(*kernel_, Finish(_, _)).WillOnce(Return(kTfLiteOk));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerPrepareBackends(runner_));
auto* task = TfLiteAsyncSignatureRunnerCreateTask(runner_);
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerInvokeAsync(runner_, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerWait(runner_, task));
EXPECT_EQ(kTfLiteOk, TfLiteAsyncSignatureRunnerFinish(runner_, task));
}
TEST_P(AsyncSignatureRunnerTest, InputsTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetInputCount(runner_));
if (has_signature) {
EXPECT_STREQ("input", TfLiteAsyncSignatureRunnerGetInputName(runner_, 0));
EXPECT_STREQ(
"x", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetInputTensor(runner_, "input")));
} else {
EXPECT_EQ(nullptr, TfLiteAsyncSignatureRunnerGetInputName(runner_, 0));
EXPECT_EQ(nullptr,
TfLiteAsyncSignatureRunnerGetInputTensor(runner_, "input"));
}
}
TEST_P(AsyncSignatureRunnerTest, OutputsTest) {
bool has_signature = GetParam();
BuildRunner(has_signature);
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetOutputCount(runner_));
if (has_signature) {
EXPECT_STREQ("output", TfLiteAsyncSignatureRunnerGetOutputName(runner_, 0));
EXPECT_STREQ(
"a", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetOutputTensor(runner_, "output")));
} else {
EXPECT_EQ(nullptr, TfLiteAsyncSignatureRunnerGetOutputName(runner_, 0));
EXPECT_EQ(nullptr,
TfLiteAsyncSignatureRunnerGetOutputTensor(runner_, "output"));
}
}
TEST_P(AsyncSignatureRunnerTest, InputByIndexTest) {
BuildRunner(GetParam());
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetInputCount(runner_));
auto* indices = TfLiteAsyncSignatureRunnerInputTensorIndices(runner_);
EXPECT_NE(nullptr, indices);
auto indice = indices[0];
EXPECT_STREQ("x", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetTensor(runner_, indice)));
}
TEST_P(AsyncSignatureRunnerTest, OutputsByIndexTest) {
BuildRunner(GetParam());
EXPECT_EQ(1, TfLiteAsyncSignatureRunnerGetOutputCount(runner_));
auto* indices = TfLiteAsyncSignatureRunnerOutputTensorIndices(runner_);
EXPECT_NE(nullptr, indices);
auto indice = indices[0];
EXPECT_STREQ("a", TfLiteOpaqueTensorName(
TfLiteAsyncSignatureRunnerGetTensor(runner_, indice)));
}
TEST_P(AsyncSignatureRunnerTest, IndexOutOfBound) {
BuildRunner(GetParam());
EXPECT_EQ(nullptr, TfLiteAsyncSignatureRunnerGetTensor(runner_, 42));
}
}
} |
91 | #ifndef ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_
#define ABSL_CRC_INTERNAL_CRC_CORD_STATE_H_
#include <atomic>
#include <cstddef>
#include <deque>
#include "absl/base/config.h"
#include "absl/crc/crc32c.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace crc_internal {
class CrcCordState {
public:
CrcCordState();
CrcCordState(const CrcCordState&);
CrcCordState(CrcCordState&&);
~CrcCordState();
CrcCordState& operator=(const CrcCordState&);
CrcCordState& operator=(CrcCordState&&);
struct PrefixCrc {
PrefixCrc() = default;
PrefixCrc(size_t length_arg, absl::crc32c_t crc_arg)
: length(length_arg), crc(crc_arg) {}
size_t length = 0;
absl::crc32c_t crc = absl::crc32c_t{0};
};
struct Rep {
PrefixCrc removed_prefix;
std::deque<PrefixCrc> prefix_crc;
};
const Rep& rep() const { return refcounted_rep_->rep; }
Rep* mutable_rep() {
if (refcounted_rep_->count.load(std::memory_order_acquire) != 1) {
RefcountedRep* copy = new RefcountedRep;
copy->rep = refcounted_rep_->rep;
Unref(refcounted_rep_);
refcounted_rep_ = copy;
}
return &refcounted_rep_->rep;
}
absl::crc32c_t Checksum() const;
bool IsNormalized() const { return rep().removed_prefix.length == 0; }
void Normalize();
size_t NumChunks() const { return rep().prefix_crc.size(); }
PrefixCrc NormalizedPrefixCrcAtNthChunk(size_t n) const;
void Poison();
private:
struct RefcountedRep {
std::atomic<int32_t> count{1};
Rep rep;
};
static RefcountedRep* RefSharedEmptyRep();
static void Ref(RefcountedRep* r) {
assert(r != nullptr);
r->count.fetch_add(1, std::memory_order_relaxed);
}
static void Unref(RefcountedRep* r) {
assert(r != nullptr);
if (r->count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
delete r;
}
}
RefcountedRep* refcounted_rep_;
};
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/crc/internal/crc_cord_state.h"
#include <cassert>
#include "absl/base/config.h"
#include "absl/base/no_destructor.h"
#include "absl/numeric/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace crc_internal {
CrcCordState::RefcountedRep* CrcCordState::RefSharedEmptyRep() {
static absl::NoDestructor<CrcCordState::RefcountedRep> empty;
assert(empty->count.load(std::memory_order_relaxed) >= 1);
assert(empty->rep.removed_prefix.length == 0);
assert(empty->rep.prefix_crc.empty());
Ref(empty.get());
return empty.get();
}
CrcCordState::CrcCordState() : refcounted_rep_(new RefcountedRep) {}
CrcCordState::CrcCordState(const CrcCordState& other)
: refcounted_rep_(other.refcounted_rep_) {
Ref(refcounted_rep_);
}
CrcCordState::CrcCordState(CrcCordState&& other)
: refcounted_rep_(other.refcounted_rep_) {
other.refcounted_rep_ = RefSharedEmptyRep();
}
CrcCordState& CrcCordState::operator=(const CrcCordState& other) {
if (this != &other) {
Unref(refcounted_rep_);
refcounted_rep_ = other.refcounted_rep_;
Ref(refcounted_rep_);
}
return *this;
}
CrcCordState& CrcCordState::operator=(CrcCordState&& other) {
if (this != &other) {
Unref(refcounted_rep_);
refcounted_rep_ = other.refcounted_rep_;
other.refcounted_rep_ = RefSharedEmptyRep();
}
return *this;
}
CrcCordState::~CrcCordState() {
Unref(refcounted_rep_);
}
crc32c_t CrcCordState::Checksum() const {
if (rep().prefix_crc.empty()) {
return absl::crc32c_t{0};
}
if (IsNormalized()) {
return rep().prefix_crc.back().crc;
}
return absl::RemoveCrc32cPrefix(
rep().removed_prefix.crc, rep().prefix_crc.back().crc,
rep().prefix_crc.back().length - rep().removed_prefix.length);
}
CrcCordState::PrefixCrc CrcCordState::NormalizedPrefixCrcAtNthChunk(
size_t n) const {
assert(n < NumChunks());
if (IsNormalized()) {
return rep().prefix_crc[n];
}
size_t length = rep().prefix_crc[n].length - rep().removed_prefix.length;
return PrefixCrc(length,
absl::RemoveCrc32cPrefix(rep().removed_prefix.crc,
rep().prefix_crc[n].crc, length));
}
void CrcCordState::Normalize() {
if (IsNormalized() || rep().prefix_crc.empty()) {
return;
}
Rep* r = mutable_rep();
for (auto& prefix_crc : r->prefix_crc) {
size_t remaining = prefix_crc.length - r->removed_prefix.length;
prefix_crc.crc = absl::RemoveCrc32cPrefix(r->removed_prefix.crc,
prefix_crc.crc, remaining);
prefix_crc.length = remaining;
}
r->removed_prefix = PrefixCrc();
}
void CrcCordState::Poison() {
Rep* rep = mutable_rep();
if (NumChunks() > 0) {
for (auto& prefix_crc : rep->prefix_crc) {
uint32_t crc = static_cast<uint32_t>(prefix_crc.crc);
crc += 0x2e76e41b;
crc = absl::rotr(crc, 17);
prefix_crc.crc = crc32c_t{crc};
}
} else {
rep->prefix_crc.emplace_back(0, crc32c_t{1});
}
}
}
ABSL_NAMESPACE_END
} | #include "absl/crc/internal/crc_cord_state.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <utility>
#include "gtest/gtest.h"
#include "absl/crc/crc32c.h"
namespace {
TEST(CrcCordState, Default) {
absl::crc_internal::CrcCordState state;
EXPECT_TRUE(state.IsNormalized());
EXPECT_EQ(state.Checksum(), absl::crc32c_t{0});
state.Normalize();
EXPECT_EQ(state.Checksum(), absl::crc32c_t{0});
}
TEST(CrcCordState, Normalize) {
absl::crc_internal::CrcCordState state;
auto* rep = state.mutable_rep();
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(2000, absl::crc32c_t{2000}));
rep->removed_prefix =
absl::crc_internal::CrcCordState::PrefixCrc(500, absl::crc32c_t{500});
EXPECT_FALSE(state.IsNormalized());
absl::crc32c_t crc = state.Checksum();
state.Normalize();
EXPECT_TRUE(state.IsNormalized());
EXPECT_EQ(state.Checksum(), crc);
EXPECT_EQ(rep->removed_prefix.length, 0);
}
TEST(CrcCordState, Copy) {
absl::crc_internal::CrcCordState state;
auto* rep = state.mutable_rep();
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
absl::crc_internal::CrcCordState copy = state;
EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000});
EXPECT_EQ(copy.Checksum(), absl::crc32c_t{1000});
}
TEST(CrcCordState, UnsharedSelfCopy) {
absl::crc_internal::CrcCordState state;
auto* rep = state.mutable_rep();
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
const absl::crc_internal::CrcCordState& ref = state;
state = ref;
EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000});
}
TEST(CrcCordState, Move) {
absl::crc_internal::CrcCordState state;
auto* rep = state.mutable_rep();
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
absl::crc_internal::CrcCordState moved = std::move(state);
EXPECT_EQ(moved.Checksum(), absl::crc32c_t{1000});
}
TEST(CrcCordState, UnsharedSelfMove) {
absl::crc_internal::CrcCordState state;
auto* rep = state.mutable_rep();
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
absl::crc_internal::CrcCordState& ref = state;
state = std::move(ref);
EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000});
}
TEST(CrcCordState, PoisonDefault) {
absl::crc_internal::CrcCordState state;
state.Poison();
EXPECT_NE(state.Checksum(), absl::crc32c_t{0});
}
TEST(CrcCordState, PoisonData) {
absl::crc_internal::CrcCordState state;
auto* rep = state.mutable_rep();
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000}));
rep->prefix_crc.push_back(
absl::crc_internal::CrcCordState::PrefixCrc(2000, absl::crc32c_t{2000}));
rep->removed_prefix =
absl::crc_internal::CrcCordState::PrefixCrc(500, absl::crc32c_t{500});
absl::crc32c_t crc = state.Checksum();
state.Poison();
EXPECT_NE(state.Checksum(), crc);
}
} |
92 | #ifndef TENSORSTORE_INTERNAL_RIEGELI_FIND_H_
#define TENSORSTORE_INTERNAL_RIEGELI_FIND_H_
#include <string_view>
#include "riegeli/bytes/reader.h"
namespace tensorstore {
namespace internal {
bool StartsWith(riegeli::Reader &reader, std::string_view needle);
bool FindFirst(riegeli::Reader &reader, std::string_view needle);
bool FindLast(riegeli::Reader &reader, std::string_view needle);
}
}
#endif
#include "tensorstore/internal/riegeli/find.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cstring>
#include <optional>
#include <string_view>
#include "riegeli/bytes/reader.h"
namespace tensorstore {
namespace internal {
bool StartsWith(riegeli::Reader &reader, std::string_view needle) {
return reader.ok() &&
reader.Pull(needle.size()) &&
memcmp(reader.cursor(), needle.data(), needle.size()) == 0;
}
bool FindFirst(riegeli::Reader &reader, std::string_view needle) {
while (true) {
if (!reader.Pull(needle.size())) break;
auto end = reader.cursor() + reader.available();
auto pos = std::search(reader.cursor(), end, needle.begin(), needle.end());
if (pos != end) {
reader.move_cursor(pos - reader.cursor());
return true;
}
reader.move_cursor(1 + reader.available() - needle.size());
}
return false;
}
bool FindLast(riegeli::Reader &reader, std::string_view needle) {
if (reader.SupportsSize()) {
auto size = reader.Size();
if (size && reader.Pull(*size)) {
auto found_pos = std::string_view(reader.cursor(), *size).rfind(needle);
if (found_pos == std::string_view::npos) return false;
return reader.Seek(found_pos + reader.pos());
}
}
std::optional<uint64_t> found;
while (reader.ok()) {
for (size_t available = reader.available(); available > needle.size();
available = reader.available()) {
if (memcmp(reader.cursor(), needle.data(), needle.size()) == 0) {
found = reader.pos();
}
const char *pos = static_cast<const char *>(
memchr(reader.cursor() + 1, needle[0], available - 1));
if (pos == nullptr) {
reader.move_cursor(available);
break;
}
reader.move_cursor(pos - reader.cursor());
}
if (!reader.Pull(needle.size() - reader.available())) break;
}
return found.has_value() && reader.Seek(*found);
}
}
} | #include "tensorstore/internal/riegeli/find.h"
#include <stddef.h>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "riegeli/bytes/string_reader.h"
namespace {
using ::tensorstore::internal::FindFirst;
using ::tensorstore::internal::FindLast;
using ::tensorstore::internal::StartsWith;
static constexpr unsigned char kData[] = {
0x17, 0x16, 0xa1, 0xcb, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xfe, 0xff,
0x03, 0x04, 0xbb, 0xcc, 0xc7, 0xb6, 0xbe, 0x5d, 0x7c, 0x2d, 0x23, 0x44,
0xa0, 0xbe, 0x13, 0x1b, 0x9a, 0x2d, 0xf2, 0x13, 0x6a, 0xfb, 0xad, 0xdb,
0x73, 0xf9, 0x3d, 0xbc, 0x5d, 0x7c, 0x6f, 0x41, 0xc0, 0xad, 0xf3, 0x31,
0x79, 0x7f, 0x89, 0xb2, 0xe4, 0xa9, 0xf5, 0x9d, 0xc0, 0x30, 0x23, 0x32,
0x99, 0x2c, 0x16, 0x42, 0xf5, 0x48, 0xd1, 0x79, 0xdb, 0x98, 0xb9, 0xc3,
0x6c, 0xa6, 0x50, 0xcd, 0x86, 0xb6, 0xd3, 0xa7, 0x57, 0x3b, 0xe6, 0x1d,
0xa5, 0xe2, 0x79, 0xe9, 0x2d, 0x19, 0xec, 0xa6, 0xf3, 0xa3, 0x50, 0x65,
0x03, 0x04, 0xbb, 0xcc, 0x1a, 0xc9, 0xec, 0xb2, 0xa6, 0x3e, 0xe0, 0x49,
0x6a, 0x30, 0xd7, 0x1f, 0x90, 0x08, 0x1c, 0x2a, 0x6b, 0xbd, 0x06, 0x9c,
0xef, 0xd2, 0x79, 0x20, 0x64, 0xbc, 0xb7, 0x75, 0xbb, 0xcd, 0xcc, 0xa8,
0x49, 0x8b, 0x30, 0x4f, 0x73, 0x7c, 0xb5, 0x6e, 0x08, 0x1b, 0xc2, 0x7f,
0xfb, 0xb1, 0xc4, 0x49, 0x89, 0x74, 0xe7, 0x8e, 0x9d, 0x6f, 0x44, 0x14,
0xbd, 0xdc, 0x6a, 0xd9, 0xcb, 0x53, 0x2b, 0xdc, 0x48, 0x6c, 0xa3, 0x14,
0x4e, 0xc0, 0x3b, 0x6b, 0x47, 0x50, 0xd5, 0x97, 0x84, 0x30, 0xd5, 0x28,
0x03, 0x04, 0xbb, 0xcc, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0xfe, 0xff,
};
constexpr const unsigned char kLiteral1[4] = {0x03, 0x04, 0xbb, 0xcc};
constexpr const unsigned char kLiteral2[3] = {0xff, 0xfe, 0xff};
TEST(FindTest, FindFirst) {
const std::string_view literal1(reinterpret_cast<const char*>(kLiteral1),
sizeof(kLiteral1));
const std::string_view literal2(reinterpret_cast<const char*>(kLiteral2),
sizeof(kLiteral2));
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kData),
sizeof(kData));
size_t positions[3] = {0, 0, 0};
for (int i = 0; i < 3; ++i) {
EXPECT_TRUE(FindFirst(string_reader, literal1));
EXPECT_TRUE(StartsWith(string_reader, literal1));
positions[i] = string_reader.pos();
string_reader.Skip(sizeof(kLiteral1));
}
EXPECT_FALSE(FindFirst(string_reader, literal1));
EXPECT_THAT(positions, ::testing::ElementsAre(12, 96, 180));
string_reader.Seek(0);
EXPECT_TRUE(FindFirst(string_reader, literal2));
EXPECT_THAT(string_reader.pos(), 9);
}
TEST(FindTest, FindLast) {
const std::string_view literal1(reinterpret_cast<const char*>(kLiteral1),
sizeof(kLiteral1));
const std::string_view literal2(reinterpret_cast<const char*>(kLiteral2),
sizeof(kLiteral2));
riegeli::StringReader string_reader(reinterpret_cast<const char*>(kData),
sizeof(kData));
EXPECT_TRUE(FindLast(string_reader, literal1));
EXPECT_TRUE(StartsWith(string_reader, literal1));
EXPECT_THAT(string_reader.pos(), 180);
string_reader.Seek(0);
EXPECT_TRUE(FindLast(string_reader, literal2));
EXPECT_THAT(string_reader.pos(), 189);
}
} |
93 | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_STREAM_WRITER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_STREAM_WRITER_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr ByteSize kDefaultMaxChunkSize = ByteSize::GB(6);
constexpr absl::Duration kDefaultCheckpointInterval = absl::Minutes(30);
struct SnapshotWriterParams {
std::string snapshot_path;
int64_t stream_index = 0;
std::string compression;
Env* env = nullptr;
ByteSize max_chunk_size = kDefaultMaxChunkSize;
absl::Duration checkpoint_interval = kDefaultCheckpointInterval;
bool test_only_keep_temp_files = false;
std::string StreamDirectory() const {
return tensorflow::data::StreamDirectory(snapshot_path, stream_index);
}
std::string CommittedChunksDirectory() const {
return tensorflow::data::CommittedChunksDirectory(snapshot_path);
}
std::string UncommittedChunksDirectory() const {
return tensorflow::data::UncommittedChunksDirectory(snapshot_path,
stream_index);
}
std::string CheckpointsDirectory() const {
return tensorflow::data::CheckpointsDirectory(snapshot_path, stream_index);
}
std::string DebugString() const {
return absl::Substitute(
"SnapshotWriterParams { base_path: $0, stream: $1, compression: $2 }",
snapshot_path, stream_index, compression);
}
};
class SnapshotStreamWriter {
public:
explicit SnapshotStreamWriter(const SnapshotWriterParams& params,
std::unique_ptr<TaskIterator> iterator);
virtual ~SnapshotStreamWriter() = default;
SnapshotStreamWriter(const SnapshotStreamWriter&) = delete;
SnapshotStreamWriter& operator=(const SnapshotStreamWriter&) = delete;
absl::StatusOr<bool> Completed() const;
absl::StatusOr<bool> Wait();
void Cancel();
private:
void WriteSnapshotAndLog();
absl::Status WriteSnapshot();
bool StreamAlreadyCompleted() const;
absl::Status InitializeDirectories();
bool ShouldWriteChunks() const;
absl::Status WriteChunks();
bool ShouldWriteRecord() const;
absl::Status WriteRecord(ParallelTFRecordWriter& writer);
absl::Status Commit(const ParallelTFRecordWriter::FileToStatsMap& file_stats);
absl::Status FinalizeStream(absl::Status status);
absl::Status WriteDoneFile();
absl::Status WriteErrorFile(const absl::Status& status);
absl::Status Save(const ParallelTFRecordWriter::FileToStatsMap& file_stats);
absl::Status DeleteOutdatedCheckpoints(int64_t checkpoint_index);
absl::Status DeleteCheckpoints();
absl::Status Restore();
absl::StatusOr<std::string> LastCheckpointName() const;
absl::Status SyncCheckpointWithChunks(std::optional<int64_t> checkpoint_index,
int64_t checkpoint_num_elements);
absl::StatusOr<int64_t> LastCommittedChunkIndex();
std::string CheckpointPath(int64_t chunk_index,
int64_t chunk_num_elements) const;
std::string CheckpointPath(const std::string& checkpoint_name) const;
const SnapshotWriterParams params_;
std::unique_ptr<TaskIterator> iterator_;
int64_t chunk_index_ = 0;
absl::Time last_commit_time_ = absl::Now();
bool end_of_sequence_ = false;
mutable mutex mu_;
absl::StatusOr<bool> completed_ TF_GUARDED_BY(mu_) = false;
std::unique_ptr<Thread> snapshot_thread_;
};
}
}
#endif
#include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/utils.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
namespace {
constexpr ByteSize kTFRecordReaderOutputBufferSize = ByteSize::GB(1);
constexpr int64_t kUnknownNumElements = -1;
constexpr const char kFileShardDelimiter[] = "_CHUNK_SHARDS_";
absl::StatusOr<int64_t> GetUncommittedChunkIndex(const std::string& filename) {
std::vector<std::string> tokens =
absl::StrSplit(filename, kFileShardDelimiter);
if (tokens.size() != 2) {
return absl::InternalError(
absl::StrCat("Invalid tf.data snapshot chunk file: ", filename,
". Expected sharded chunk files."));
}
tokens = absl::StrSplit(tokens[0], '_');
int64_t chunk_index = 0;
if (tokens.size() != 2 || tokens[0] != "chunk" ||
!absl::SimpleAtoi(tokens[1], &chunk_index) || chunk_index < 0) {
return absl::InternalError(
absl::StrCat("Invalid tf.data snapshot chunk file: ", filename,
". Expected chunk_<chunk_index>."));
}
return chunk_index;
}
size_t TotalNumElements(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
size_t num_elements = 0;
for (const auto& [file, stats] : file_stats) {
num_elements += stats.num_records;
}
return num_elements;
}
ByteSize TotalBytes(const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
ByteSize bytes;
for (const auto& [file, stats] : file_stats) {
bytes += stats.estimated_size;
}
return bytes;
}
}
SnapshotStreamWriter::SnapshotStreamWriter(
const SnapshotWriterParams& params, std::unique_ptr<TaskIterator> iterator)
: params_(params), iterator_(std::move(iterator)) {
DCHECK_NE(iterator_.get(), nullptr);
last_commit_time_ = absl::FromUnixMicros(params_.env->NowMicros());
snapshot_thread_ = absl::WrapUnique(params_.env->StartThread(
{}, "tf_data_service_snapshot_thread",
[this]() { WriteSnapshotAndLog(); }));
}
void SnapshotStreamWriter::WriteSnapshotAndLog() TF_LOCKS_EXCLUDED(mu_) {
if (StreamAlreadyCompleted()) {
LOG(INFO) << "Distributed tf.data snapshot stream has already been "
<< "completed for " << params_.DebugString();
mutex_lock l(mu_);
completed_ = true;
return;
}
LOG(INFO) << "Writing distributed tf.data snapshot stream: "
<< params_.DebugString();
absl::Status status = WriteSnapshot();
if (IsPreemptedError(status)) {
LOG(INFO) << "tf.data service snapshot writer is cancelled: " << status;
return;
}
status = FinalizeStream(status);
mutex_lock l(mu_);
if (!status.ok()) {
LOG(ERROR) << "Failed to write distributed tf.data snapshot stream: "
<< params_.DebugString() << ". Status: " << status;
completed_ = std::move(status);
return;
}
LOG(INFO) << "Finished writing distributed tf.data snapshot stream: "
<< params_.DebugString();
completed_ = true;
iterator_ = nullptr;
}
absl::Status SnapshotStreamWriter::WriteSnapshot() TF_LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(InitializeDirectories());
TF_RETURN_IF_ERROR(Restore());
while (ShouldWriteChunks()) {
TF_RETURN_IF_ERROR(WriteChunks());
}
mutex_lock l(mu_);
return completed_.status();
}
bool SnapshotStreamWriter::StreamAlreadyCompleted() const {
std::string done_file_path =
StreamDoneFilePath(params_.snapshot_path, params_.stream_index);
return params_.env->FileExists(done_file_path).ok();
}
absl::Status SnapshotStreamWriter::InitializeDirectories() {
TF_RETURN_IF_ERROR(
params_.env->RecursivelyCreateDir(params_.UncommittedChunksDirectory()));
TF_RETURN_IF_ERROR(
params_.env->RecursivelyCreateDir(params_.CheckpointsDirectory()));
return absl::OkStatus();
}
bool SnapshotStreamWriter::ShouldWriteChunks() const TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !end_of_sequence_ && completed_.ok();
}
absl::Status SnapshotStreamWriter::WriteChunks() {
LOG(INFO) << "Writing distributed tf.data snapshot " << params_.snapshot_path
<< ", stream " << params_.stream_index << ", chunk " << chunk_index_
<< ".";
std::string chunks_prefix = tsl::io::JoinPath(
params_.UncommittedChunksDirectory(),
absl::StrCat("chunk_", chunk_index_, kFileShardDelimiter));
ParallelTFRecordWriter writer(TranslateFileName(chunks_prefix),
params_.compression, params_.env,
params_.max_chunk_size);
do {
TF_RETURN_IF_ERROR(WriteRecord(writer));
} while (ShouldWriteRecord());
TF_ASSIGN_OR_RETURN(const ParallelTFRecordWriter::FileToStatsMap file_stats,
writer.Finalize());
TF_RETURN_IF_ERROR(Completed().status());
TF_RETURN_IF_ERROR(Commit(file_stats));
metrics::RecordTFDataServiceSnapshotBytesCommitted(
TotalBytes(file_stats).ToUnsignedBytes());
return absl::OkStatus();
}
bool SnapshotStreamWriter::ShouldWriteRecord() const {
mutex_lock l(mu_);
if (!completed_.ok() || end_of_sequence_) {
return false;
}
const absl::Time now = absl::FromUnixMicros(params_.env->NowMicros());
const absl::Duration adjusted_checkpoint_interval = std::min(
params_.checkpoint_interval, absl::Minutes(0.5 * chunk_index_ + 5));
return now < last_commit_time_ + adjusted_checkpoint_interval;
}
absl::Status SnapshotStreamWriter::WriteRecord(ParallelTFRecordWriter& writer) {
std::vector<Tensor> element;
TF_RETURN_IF_ERROR(iterator_->GetNext(element, end_of_sequence_));
if (end_of_sequence_) {
return absl::OkStatus();
}
return writer.Write(std::move(element));
}
absl::Status SnapshotStreamWriter::Commit(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
TF_RETURN_IF_ERROR(Save(file_stats));
for (const auto& [file, stats] : file_stats) {
std::string committed_chunk_path =
tsl::io::JoinPath(params_.CommittedChunksDirectory(),
absl::StrCat("chunk_", params_.stream_index, "_",
chunk_index_++, "_", stats.num_records));
TF_RETURN_IF_ERROR(params_.env->RenameFile(file, committed_chunk_path));
}
last_commit_time_ = absl::FromUnixMicros(params_.env->NowMicros());
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::FinalizeStream(absl::Status status) {
if (status.ok()) {
status = WriteDoneFile();
}
if (!status.ok()) {
WriteErrorFile(status).IgnoreError();
}
absl::Status s = DeleteCheckpoints();
if (!s.ok()) {
LOG(ERROR) << "Failed to clean up checkpoints at "
<< params_.CheckpointsDirectory() << ": " << s;
}
return status;
}
absl::Status SnapshotStreamWriter::WriteDoneFile() {
std::string done_file_path =
StreamDoneFilePath(params_.snapshot_path, params_.stream_index);
return AtomicallyWriteStringToFile(done_file_path, "", params_.env);
}
absl::Status SnapshotStreamWriter::WriteErrorFile(const absl::Status& status) {
std::string error_file_path =
tsl::io::JoinPath(params_.StreamDirectory(), "ERROR");
return AtomicallyWriteStringToFile(error_file_path, status.ToString(),
params_.env);
}
absl::StatusOr<bool> SnapshotStreamWriter::Completed() const
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return completed_;
}
absl::StatusOr<bool> SnapshotStreamWriter::Wait() TF_LOCKS_EXCLUDED(mu_) {
snapshot_thread_.reset();
mutex_lock l(mu_);
return completed_;
}
void SnapshotStreamWriter::Cancel() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
completed_ = absl::CancelledError(
"The tf.data service snapshot writer has been cancelled.");
}
absl::Status SnapshotStreamWriter::Save(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
const size_t num_elements = TotalNumElements(file_stats);
const ByteSize byte_size = TotalBytes(file_stats);
LOG(INFO) << "Checkpointing distributed tf.data snapshot writer for snapshot "
<< params_.DebugString() << ". Stream " << params_.stream_index
<< ", chunk " << chunk_index_
<< ", number of elements in chunk: " << num_elements
<< ", chunk size: " << byte_size << ".";
tsl::profiler::TraceMe activity("SnapshotCheckpoint",
tsl::profiler::TraceMeLevel::kInfo);
absl::Time start_time = absl::FromUnixMicros(params_.env->NowMicros());
int64_t checkpoint_index = chunk_index_ + file_stats.size();
std::string checkpoint_path = CheckpointPath(checkpoint_index, num_elements);
TF_ASSIGN_OR_RETURN(std::vector<Tensor> serialized_iterator,
iterator_->Save());
TF_RETURN_IF_ERROR(AtomicallyWriteTFRecords(
checkpoint_path, serialized_iterator, params_.compression, params_.env));
absl::Time end_time = absl::FromUnixMicros(params_.env->NowMicros());
LOG(INFO) << "Wrote checkpoint file " << checkpoint_path << ". "
<< "Checkpointing distributed tf.data snapshot writer took "
<< (end_time - start_time);
return DeleteOutdatedCheckpoints(checkpoint_index);
}
absl::Status SnapshotStreamWriter::DeleteOutdatedCheckpoints(
int64_t checkpoint_index) {
if (params_.test_only_keep_temp_files) {
return absl::OkStatus();
}
std::vector<std::string> checkpoint_filenames;
TF_RETURN_IF_ERROR(params_.env->GetChildren(params_.CheckpointsDirectory(),
&checkpoint_filenames));
for (const std::string& checkpoint_filename : checkpoint_filenames) {
std::string checkpoint_filepath =
tsl::io::JoinPath(params_.CheckpointsDirectory(), checkpoint_filename);
if (IsTemporaryFile(checkpoint_filename)) {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(checkpoint_filepath));
continue;
}
TF_ASSIGN_OR_RETURN(auto checkpoint_filename_tokens,
ParseCheckpointFilename(checkpoint_filename));
auto [checkpoint_file_index, _] = checkpoint_filename_tokens;
if (checkpoint_file_index < checkpoint_index) {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(checkpoint_filepath));
}
}
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::DeleteCheckpoints() {
if (params_.test_only_keep_temp_files) {
return absl::OkStatus();
}
LOG(INFO) << "Deleting tf.data snapshot checkpoints directory: "
<< params_.CheckpointsDirectory();
if (params_.env->FileExists(params_.CheckpointsDirectory()).ok()) {
int64_t undeleted_files, undeleted_dirs;
return params_.env->DeleteRecursively(params_.CheckpointsDirectory(),
&undeleted_files, &undeleted_dirs);
}
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::Restore() {
absl::StatusOr<std::string> checkpoint_name = LastCheckpointName();
if (absl::IsNotFound(checkpoint_name.status())) {
return SyncCheckpointWithChunks(std::nullopt,
kUnknownNumElements);
}
TF_RETURN_IF_ERROR(checkpoint_name.status());
snapshot_util::TFRecordReaderImpl reader(
CheckpointPath(*checkpoint_name), params_.compression,
kTFRecordReaderOutputBufferSize.ToUnsignedBytes());
TF_RETURN_IF_ERROR(reader.Initialize(params_.env));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> serialized_tensors,
reader.GetTensors());
TF_RETURN_IF_ERROR(iterator_->Restore(serialized_tensors));
TF_ASSIGN_OR_RETURN(auto checkpoint_name_tokens,
ParseCheckpointFilename(*checkpoint_name));
auto [checkpoint_index, checkpoint_num_elements] = checkpoint_name_tokens;
TF_RETURN_IF_ERROR(
SyncCheckpointWithChunks(checkpoint_index, checkpoint_num_elements));
chunk_index_ = checkpoint_index;
LOG(INFO) << "Restored distributed tf.data snapshot writer. Snapshot "
<< params_.snapshot_path << ", stream " << params_.stream_index
<< ", chunk " << checkpoint_index << ".";
return absl::OkStatus();
}
absl::StatusOr<std::string> SnapshotStreamWriter::LastCheckpointName() const {
TF_ASSIGN_OR_RETURN(std::vector<std::string> checkpoint_names,
GetChildren(params_.CheckpointsDirectory(), params_.env));
if (checkpoint_names.empty()) {
return absl::NotFoundError(
absl::StrCat("No checkpoint has been written in directory ",
params_.CheckpointsDirectory()));
}
int64_t last_index = -1;
std::string last_checkpoint_name = "";
for (const std::string& checkpoint_name : checkpoint_names) {
TF_ASSIGN_OR_RETURN(auto checkpoint_name_tokens,
ParseCheckpointFilename(checkpoint_name));
auto [checkpoint_index, unused] = checkpoint_name_tokens;
if (checkpoint_index > last_index) {
last_index = checkpoint_index;
last_checkpoint_name = checkpoint_name;
}
}
return last_checkpoint_name;
}
absl::Status SnapshotStreamWriter::SyncCheckpointWithChunks(
std::optional<int64_t> checkpoint_index, int64_t checkpoint_num_elements) {
TF_ASSIGN_OR_RETURN(
std::vector<std::string> uncommitted_chunks,
GetChildren(params_.UncommittedChunksDirectory(), params_.env));
TF_ASSIGN_OR_RETURN(int64_t last_committed_chunk_index,
LastCommittedChunkIndex());
int64_t next_chunk_index = last_committed_chunk_index + 1;
for (const std::string& uncommitted_chunk : uncommitted_chunks) {
std::string uncommitted_chunk_filename = tsl::io::JoinPath(
params_.UncommittedChunksDirectory(), uncommitted_chunk);
TF_ASSIGN_OR_RETURN(int64_t uncommitted_chunk_index,
GetUncommittedChunkIndex(uncommitted_chunk));
if (checkpoint_index.has_value() &&
uncommitted_chunk_index < *checkpoint_index) {
int64_t chunk_num_elements = (next_chunk_index == *checkpoint_index - 1)
? checkpoint_num_elements
: kUnknownNumElements;
std::string committed_chunk_filename = tsl::io::JoinPath(
params_.CommittedChunksDirectory(),
absl::StrCat("chunk_", params_.stream_index, "_", next_chunk_index,
"_", chunk_num_elements));
TF_RETURN_IF_ERROR(params_.env->RenameFile(uncommitted_chunk_filename,
committed_chunk_filename));
++next_chunk_index;
} else {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(uncommitted_chunk_filename));
}
}
if (checkpoint_index.has_value() && next_chunk_index != *checkpoint_index) {
return absl::InternalError(absl::StrCat(
"Failed to recover tf.data snapshot writer: Unable to find chunks [",
next_chunk_index, ", ", *checkpoint_index, ")."));
}
return absl::OkStatus();
}
absl::StatusOr<int64_t> SnapshotStreamWriter::LastCommittedChunkIndex() {
std::string committed_chunks_directory = params_.CommittedChunksDirectory();
TF_ASSIGN_OR_RETURN(
std::vector<std::string> committed_chunks,
GetChildren(params_.CommittedChunksDirectory(), params_.env));
int64_t last_committed_chunk_index = -1;
for (const std::string& committed_chunk : committed_chunks) {
TF_ASSIGN_OR_RETURN(auto chunk_filename_tokens,
ParseChunkFilename(committed_chunk));
const auto [stream_index, chunk_index, _] = chunk_filename_tokens;
if (stream_index != params_.stream_index) {
continue;
}
if (chunk_index > last_committed_chunk_index) {
last_committed_chunk_index = chunk_index;
}
}
return last_committed_chunk_index;
}
std::string SnapshotStreamWriter::CheckpointPath(
int64_t chunk_index, int64_t chunk_num_elements) const {
return tsl::io::JoinPath(
params_.CheckpointsDirectory(),
absl::StrCat("checkpoint_", chunk_index, "_", chunk_num_elements));
}
std::string SnapshotStreamWriter::CheckpointPath(
const std::string& checkpoint_name) const {
return tsl::io::JoinPath(params_.CheckpointsDirectory(), checkpoint_name);
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/test_utils.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/compression.h"
#include "tsl/lib/monitoring/cell_reader.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::testing::ValuesIn;
using ::tsl::monitoring::testing::CellReader;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::unique_ptr<StandaloneTaskIterator>> TestIterator(
const DatasetDef& dataset_def) {
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iterator));
return std::make_unique<StandaloneTaskIterator>(std::move(dataset),
std::move(iterator));
}
template <class T>
class ElementOrErrorIterator : public TaskIterator {
public:
explicit ElementOrErrorIterator(
const std::vector<absl::StatusOr<T>>& elements)
: elements_(elements) {}
absl::Status GetNext(std::vector<Tensor>& element,
bool& end_of_sequence) override {
end_of_sequence = (next_ >= elements_.size());
if (end_of_sequence) {
return absl::OkStatus();
}
const absl::StatusOr<T>& next_element = elements_[next_++];
TF_RETURN_IF_ERROR(next_element.status());
element = {Tensor{*next_element}};
return absl::OkStatus();
}
absl::StatusOr<std::vector<Tensor>> Save() override {
return std::vector<Tensor>{};
}
absl::Status Restore(const std::vector<Tensor>& saved_iterator) override {
return absl::OkStatus();
}
int64_t Cardinality() const override { return elements_.size(); }
private:
const std::vector<absl::StatusOr<T>> elements_;
int64_t next_ = 0;
};
absl::StatusOr<std::string> CreateSnapshotDirectory() {
std::string snapshot_path;
if (!Env::Default()->LocalTempFilename(&snapshot_path)) {
return absl::FailedPreconditionError(
"Failed to create local temp file for snapshot.");
}
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
return snapshot_path;
}
absl::StatusOr<std::unique_ptr<snapshot_util::Reader>> CreateSnapshotReader(
const std::string& snapshot_path, int64_t num_elements,
const std::string& compression, Env* env) {
static constexpr int kTFRecordReader = 2;
DataTypeVector dtypes(num_elements, DT_INT64);
std::unique_ptr<snapshot_util::Reader> reader;
TF_RETURN_IF_ERROR(snapshot_util::Reader::Create(
env, snapshot_path, compression, kTFRecordReader, dtypes, &reader));
return reader;
}
template <class T>
absl::StatusOr<std::vector<T>> ReadSnapshot(const std::string& snapshot_path,
const std::string& compression,
int64_t num_elements) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<snapshot_util::Reader> reader,
CreateSnapshotReader(snapshot_path, num_elements,
compression, Env::Default()));
std::vector<Tensor> tensors;
TF_RETURN_IF_ERROR(reader->ReadTensors(&tensors));
std::vector<T> result;
for (const Tensor& tensor : tensors) {
result.push_back(tensor.unaligned_flat<T>().data()[0]);
}
return result;
}
absl::StatusOr<std::string> ReadStringFromFile(const std::string& filename) {
std::string data;
TF_RETURN_IF_ERROR(ReadFileToString(Env::Default(), filename, &data));
return data;
}
class SnapshotStreamWriterParameterizedTest
: public ::testing::TestWithParam<std::string> {
public:
std::string Compression() const { return GetParam(); }
};
TEST_P(SnapshotStreamWriterParameterizedTest, WriteSnapshot) {
CellReader<int64_t> cell_reader(
"/tensorflow/data/service/snapshot_bytes_committed");
EXPECT_EQ(cell_reader.Delta(), 0);
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
EXPECT_THAT(
GetChildren(writer_params.UncommittedChunksDirectory(), Env::Default()),
IsOkAndHolds(IsEmpty()));
EXPECT_GE(cell_reader.Delta(), 80);
}
TEST_P(SnapshotStreamWriterParameterizedTest, StreamAlreadyCompleted) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
TF_ASSERT_OK_AND_ASSIGN(iterator, TestIterator(testing::RangeDataset(range)));
SnapshotStreamWriter duplicate_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteSnapshotChunks) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(
GetChildren(writer_params.CommittedChunksDirectory(), Env::Default()),
IsOkAndHolds(SizeIs(range)));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteDoneFile) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::string done_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "DONE");
std::string error_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "ERROR");
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
TF_EXPECT_OK(Env::Default()->FileExists(done_file_path));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(snapshot_writer.Completed(), IsOkAndHolds(true));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteErrorFile) {
auto error_iterator = std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
absl::InvalidArgumentError("Invalid argument"),
tstring("Second element"), absl::AbortedError("Aborted")});
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::string done_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "DONE");
std::string error_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "ERROR");
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params,
std::move(error_iterator));
EXPECT_THAT(snapshot_writer.Wait(),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
TF_EXPECT_OK(Env::Default()->FileExists(error_file_path));
EXPECT_THAT(ReadStringFromFile(error_file_path),
IsOkAndHolds(HasSubstr("Invalid argument")));
EXPECT_THAT(snapshot_writer.Completed(),
StatusIs(absl::StatusCode::kInvalidArgument));
}
INSTANTIATE_TEST_SUITE_P(Compression, SnapshotStreamWriterParameterizedTest,
ValuesIn<std::string>({tsl::io::compression::kNone,
tsl::io::compression::kGzip,
tsl::io::compression::kSnappy,
tsl::io::compression::kZlib}));
TEST(SnapshotStreamWriterTest, EmptyDataset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(0)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
tsl::io::compression::kSnappy,
Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path,
tsl::io::compression::kSnappy),
IsOkAndHolds(IsEmpty()));
}
TEST(SnapshotStreamWriterTest, Cancel) {
const int64_t range = 10000;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
tsl::io::compression::kSnappy,
Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
snapshot_writer.Cancel();
EXPECT_THAT(snapshot_writer.Wait(), StatusIs(absl::StatusCode::kCancelled));
}
}
}
} |
94 | #ifndef TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_NNAPI_PLUGIN_H_
#define TENSORFLOW_LITE_CORE_ACCELERATION_CONFIGURATION_C_NNAPI_PLUGIN_H_
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#ifdef __cplusplus
extern "C" {
#endif
const TfLiteDelegatePlugin* TfLiteNnapiDelegatePluginCApi();
#ifdef __cplusplus
}
#endif
#endif
#include "tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/nnapi_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
tflite::delegates::NnapiPlugin nnapi_plugin(*tflite_settings);
auto support_library_handle = nnapi_plugin.GetSupportLibraryHandle();
if (support_library_handle) {
auto nnapi_support_library_driver =
reinterpret_cast<const NnApiSLDriverImplFL5*>(support_library_handle);
return new tflite::StatefulNnApiDelegate(nnapi_support_library_driver,
nnapi_plugin.Options());
}
return new tflite::StatefulNnApiDelegate(nnapi_plugin.Options());
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) {
auto nnapi_delegate =
static_cast<tflite::StatefulNnApiDelegate*>(from_delegate);
return nnapi_delegate->GetNnApiErrno();
}
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteNnapiDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/nnapi_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
class NnapiTest : public testing::Test {
public:
void SetUp() override {
NNAPISettingsBuilder nnapi_settings_builder(flatbuffer_builder_);
flatbuffers::Offset<NNAPISettings> nnapi_settings =
nnapi_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_nnapi_settings(nnapi_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~NnapiTest() override {}
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
TEST_F(NnapiTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate = TfLiteNnapiDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteNnapiDelegatePluginCApi()->destroy(delegate);
}
TEST_F(NnapiTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate = TfLiteNnapiDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteNnapiDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteNnapiDelegatePluginCApi()->destroy(delegate);
}
} |
95 | #ifndef THIRD_PARTY_CEL_CPP_PARSER_MACRO_EXPR_FACTORY_H_
#define THIRD_PARTY_CEL_CPP_PARSER_MACRO_EXPR_FACTORY_H_
#include <algorithm>
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/strings/string_view.h"
#include "common/expr.h"
#include "common/expr_factory.h"
namespace cel {
class ParserMacroExprFactory;
class TestMacroExprFactory;
class MacroExprFactory : protected ExprFactory {
protected:
using ExprFactory::IsArrayLike;
using ExprFactory::IsExprLike;
using ExprFactory::IsStringLike;
template <typename T, typename U>
struct IsRValue
: std::bool_constant<
std::disjunction_v<std::is_same<T, U>, std::is_same<T, U&&>>> {};
public:
ABSL_MUST_USE_RESULT Expr Copy(const Expr& expr);
ABSL_MUST_USE_RESULT ListExprElement Copy(const ListExprElement& element);
ABSL_MUST_USE_RESULT StructExprField Copy(const StructExprField& field);
ABSL_MUST_USE_RESULT MapExprEntry Copy(const MapExprEntry& entry);
ABSL_MUST_USE_RESULT Expr NewUnspecified() {
return NewUnspecified(NextId());
}
ABSL_MUST_USE_RESULT Expr NewNullConst() { return NewNullConst(NextId()); }
ABSL_MUST_USE_RESULT Expr NewBoolConst(bool value) {
return NewBoolConst(NextId(), value);
}
ABSL_MUST_USE_RESULT Expr NewIntConst(int64_t value) {
return NewIntConst(NextId(), value);
}
ABSL_MUST_USE_RESULT Expr NewUintConst(uint64_t value) {
return NewUintConst(NextId(), value);
}
ABSL_MUST_USE_RESULT Expr NewDoubleConst(double value) {
return NewDoubleConst(NextId(), value);
}
ABSL_MUST_USE_RESULT Expr NewBytesConst(std::string value) {
return NewBytesConst(NextId(), std::move(value));
}
ABSL_MUST_USE_RESULT Expr NewBytesConst(absl::string_view value) {
return NewBytesConst(NextId(), value);
}
ABSL_MUST_USE_RESULT Expr NewBytesConst(absl::Nullable<const char*> value) {
return NewBytesConst(NextId(), value);
}
ABSL_MUST_USE_RESULT Expr NewStringConst(std::string value) {
return NewStringConst(NextId(), std::move(value));
}
ABSL_MUST_USE_RESULT Expr NewStringConst(absl::string_view value) {
return NewStringConst(NextId(), value);
}
ABSL_MUST_USE_RESULT Expr NewStringConst(absl::Nullable<const char*> value) {
return NewStringConst(NextId(), value);
}
template <typename Name,
typename = std::enable_if_t<IsStringLike<Name>::value>>
ABSL_MUST_USE_RESULT Expr NewIdent(Name name) {
return NewIdent(NextId(), std::move(name));
}
ABSL_MUST_USE_RESULT Expr NewAccuIdent() { return NewAccuIdent(NextId()); }
template <typename Operand, typename Field,
typename = std::enable_if_t<IsExprLike<Operand>::value>,
typename = std::enable_if_t<IsStringLike<Field>::value>>
ABSL_MUST_USE_RESULT Expr NewSelect(Operand operand, Field field) {
return NewSelect(NextId(), std::move(operand), std::move(field));
}
template <typename Operand, typename Field,
typename = std::enable_if_t<IsExprLike<Operand>::value>,
typename = std::enable_if_t<IsStringLike<Field>::value>>
ABSL_MUST_USE_RESULT Expr NewPresenceTest(Operand operand, Field field) {
return NewPresenceTest(NextId(), std::move(operand), std::move(field));
}
template <
typename Function, typename... Args,
typename = std::enable_if_t<IsStringLike<Function>::value>,
typename = std::enable_if_t<std::conjunction_v<IsRValue<Expr, Args>...>>>
ABSL_MUST_USE_RESULT Expr NewCall(Function function, Args&&... args) {
std::vector<Expr> array;
array.reserve(sizeof...(Args));
(array.push_back(std::forward<Args>(args)), ...);
return NewCall(NextId(), std::move(function), std::move(array));
}
template <typename Function, typename Args,
typename = std::enable_if_t<IsStringLike<Function>::value>,
typename = std::enable_if_t<IsArrayLike<Expr, Args>::value>>
ABSL_MUST_USE_RESULT Expr NewCall(Function function, Args args) {
return NewCall(NextId(), std::move(function), std::move(args));
}
template <
typename Function, typename Target, typename... Args,
typename = std::enable_if_t<IsStringLike<Function>::value>,
typename = std::enable_if_t<IsExprLike<Target>::value>,
typename = std::enable_if_t<std::conjunction_v<IsRValue<Expr, Args>...>>>
ABSL_MUST_USE_RESULT Expr NewMemberCall(Function function, Target target,
Args&&... args) {
std::vector<Expr> array;
array.reserve(sizeof...(Args));
(array.push_back(std::forward<Args>(args)), ...);
return NewMemberCall(NextId(), std::move(function), std::move(target),
std::move(array));
}
template <typename Function, typename Target, typename Args,
typename = std::enable_if_t<IsStringLike<Function>::value>,
typename = std::enable_if_t<IsExprLike<Target>::value>,
typename = std::enable_if_t<IsArrayLike<Expr, Args>::value>>
ABSL_MUST_USE_RESULT Expr NewMemberCall(Function function, Target target,
Args args) {
return NewMemberCall(NextId(), std::move(function), std::move(target),
std::move(args));
}
using ExprFactory::NewListElement;
template <typename... Elements,
typename = std::enable_if_t<
std::conjunction_v<IsRValue<ListExprElement, Elements>...>>>
ABSL_MUST_USE_RESULT Expr NewList(Elements&&... elements) {
std::vector<ListExprElement> array;
array.reserve(sizeof...(Elements));
(array.push_back(std::forward<Elements>(elements)), ...);
return NewList(NextId(), std::move(array));
}
template <typename Elements,
typename =
std::enable_if_t<IsArrayLike<ListExprElement, Elements>::value>>
ABSL_MUST_USE_RESULT Expr NewList(Elements elements) {
return NewList(NextId(), std::move(elements));
}
template <typename Name, typename Value,
typename = std::enable_if_t<IsStringLike<Name>::value>,
typename = std::enable_if_t<IsExprLike<Value>::value>>
ABSL_MUST_USE_RESULT StructExprField NewStructField(Name name, Value value,
bool optional = false) {
return NewStructField(NextId(), std::move(name), std::move(value),
optional);
}
template <typename Name, typename... Fields,
typename = std::enable_if_t<IsStringLike<Name>::value>,
typename = std::enable_if_t<
std::conjunction_v<IsRValue<StructExprField, Fields>...>>>
ABSL_MUST_USE_RESULT Expr NewStruct(Name name, Fields&&... fields) {
std::vector<StructExprField> array;
array.reserve(sizeof...(Fields));
(array.push_back(std::forward<Fields>(fields)), ...);
return NewStruct(NextId(), std::move(name), std::move(array));
}
template <
typename Name, typename Fields,
typename = std::enable_if_t<IsStringLike<Name>::value>,
typename = std::enable_if_t<IsArrayLike<StructExprField, Fields>::value>>
ABSL_MUST_USE_RESULT Expr NewStruct(Name name, Fields fields) {
return NewStruct(NextId(), std::move(name), std::move(fields));
}
template <typename Key, typename Value,
typename = std::enable_if_t<IsExprLike<Key>::value>,
typename = std::enable_if_t<IsExprLike<Value>::value>>
ABSL_MUST_USE_RESULT MapExprEntry NewMapEntry(Key key, Value value,
bool optional = false) {
return NewMapEntry(NextId(), std::move(key), std::move(value), optional);
}
template <typename... Entries, typename = std::enable_if_t<std::conjunction_v<
IsRValue<MapExprEntry, Entries>...>>>
ABSL_MUST_USE_RESULT Expr NewMap(Entries&&... entries) {
std::vector<MapExprEntry> array;
array.reserve(sizeof...(Entries));
(array.push_back(std::forward<Entries>(entries)), ...);
return NewMap(NextId(), std::move(array));
}
template <typename Entries, typename = std::enable_if_t<
IsArrayLike<MapExprEntry, Entries>::value>>
ABSL_MUST_USE_RESULT Expr NewMap(Entries entries) {
return NewMap(NextId(), std::move(entries));
}
template <typename IterVar, typename IterRange, typename AccuVar,
typename AccuInit, typename LoopCondition, typename LoopStep,
typename Result,
typename = std::enable_if_t<IsStringLike<IterVar>::value>,
typename = std::enable_if_t<IsExprLike<IterRange>::value>,
typename = std::enable_if_t<IsStringLike<AccuVar>::value>,
typename = std::enable_if_t<IsExprLike<AccuInit>::value>,
typename = std::enable_if_t<IsExprLike<LoopStep>::value>,
typename = std::enable_if_t<IsExprLike<LoopCondition>::value>,
typename = std::enable_if_t<IsExprLike<Result>::value>>
ABSL_MUST_USE_RESULT Expr
NewComprehension(IterVar iter_var, IterRange iter_range, AccuVar accu_var,
AccuInit accu_init, LoopCondition loop_condition,
LoopStep loop_step, Result result) {
return NewComprehension(NextId(), std::move(iter_var),
std::move(iter_range), std::move(accu_var),
std::move(accu_init), std::move(loop_condition),
std::move(loop_step), std::move(result));
}
ABSL_MUST_USE_RESULT virtual Expr ReportError(absl::string_view message) = 0;
ABSL_MUST_USE_RESULT virtual Expr ReportErrorAt(
const Expr& expr, absl::string_view message) = 0;
protected:
using ExprFactory::NewAccuIdent;
using ExprFactory::NewBoolConst;
using ExprFactory::NewBytesConst;
using ExprFactory::NewCall;
using ExprFactory::NewComprehension;
using ExprFactory::NewConst;
using ExprFactory::NewDoubleConst;
using ExprFactory::NewIdent;
using ExprFactory::NewIntConst;
using ExprFactory::NewList;
using ExprFactory::NewMap;
using ExprFactory::NewMapEntry;
using ExprFactory::NewMemberCall;
using ExprFactory::NewNullConst;
using ExprFactory::NewPresenceTest;
using ExprFactory::NewSelect;
using ExprFactory::NewStringConst;
using ExprFactory::NewStruct;
using ExprFactory::NewStructField;
using ExprFactory::NewUintConst;
using ExprFactory::NewUnspecified;
ABSL_MUST_USE_RESULT virtual ExprId NextId() = 0;
ABSL_MUST_USE_RESULT virtual ExprId CopyId(ExprId id) = 0;
ABSL_MUST_USE_RESULT ExprId CopyId(const Expr& expr) {
return CopyId(expr.id());
}
private:
friend class ParserMacroExprFactory;
friend class TestMacroExprFactory;
MacroExprFactory() : ExprFactory() {}
};
}
#endif
#include "parser/macro_expr_factory.h"
#include <utility>
#include <vector>
#include "absl/functional/overload.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/constant.h"
#include "common/expr.h"
namespace cel {
Expr MacroExprFactory::Copy(const Expr& expr) {
return absl::visit(
absl::Overload(
[this, &expr](const UnspecifiedExpr&) -> Expr {
return NewUnspecified(CopyId(expr));
},
[this, &expr](const Constant& const_expr) -> Expr {
return NewConst(CopyId(expr), const_expr);
},
[this, &expr](const IdentExpr& ident_expr) -> Expr {
return NewIdent(CopyId(expr), ident_expr.name());
},
[this, &expr](const SelectExpr& select_expr) -> Expr {
const auto id = CopyId(expr);
return select_expr.test_only()
? NewPresenceTest(id, Copy(select_expr.operand()),
select_expr.field())
: NewSelect(id, Copy(select_expr.operand()),
select_expr.field());
},
[this, &expr](const CallExpr& call_expr) -> Expr {
const auto id = CopyId(expr);
absl::optional<Expr> target;
if (call_expr.has_target()) {
target = Copy(call_expr.target());
}
std::vector<Expr> args;
args.reserve(call_expr.args().size());
for (const auto& arg : call_expr.args()) {
args.push_back(Copy(arg));
}
return target.has_value()
? NewMemberCall(id, call_expr.function(),
std::move(*target), std::move(args))
: NewCall(id, call_expr.function(), std::move(args));
},
[this, &expr](const ListExpr& list_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<ListExprElement> elements;
elements.reserve(list_expr.elements().size());
for (const auto& element : list_expr.elements()) {
elements.push_back(Copy(element));
}
return NewList(id, std::move(elements));
},
[this, &expr](const StructExpr& struct_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<StructExprField> fields;
fields.reserve(struct_expr.fields().size());
for (const auto& field : struct_expr.fields()) {
fields.push_back(Copy(field));
}
return NewStruct(id, struct_expr.name(), std::move(fields));
},
[this, &expr](const MapExpr& map_expr) -> Expr {
const auto id = CopyId(expr);
std::vector<MapExprEntry> entries;
entries.reserve(map_expr.entries().size());
for (const auto& entry : map_expr.entries()) {
entries.push_back(Copy(entry));
}
return NewMap(id, std::move(entries));
},
[this, &expr](const ComprehensionExpr& comprehension_expr) -> Expr {
const auto id = CopyId(expr);
auto iter_range = Copy(comprehension_expr.iter_range());
auto accu_init = Copy(comprehension_expr.accu_init());
auto loop_condition = Copy(comprehension_expr.loop_condition());
auto loop_step = Copy(comprehension_expr.loop_step());
auto result = Copy(comprehension_expr.result());
return NewComprehension(
id, comprehension_expr.iter_var(), std::move(iter_range),
comprehension_expr.accu_var(), std::move(accu_init),
std::move(loop_condition), std::move(loop_step),
std::move(result));
}),
expr.kind());
}
ListExprElement MacroExprFactory::Copy(const ListExprElement& element) {
return NewListElement(Copy(element.expr()), element.optional());
}
StructExprField MacroExprFactory::Copy(const StructExprField& field) {
auto field_id = CopyId(field.id());
auto field_value = Copy(field.value());
return NewStructField(field_id, field.name(), std::move(field_value),
field.optional());
}
MapExprEntry MacroExprFactory::Copy(const MapExprEntry& entry) {
auto entry_id = CopyId(entry.id());
auto entry_key = Copy(entry.key());
auto entry_value = Copy(entry.value());
return NewMapEntry(entry_id, std::move(entry_key), std::move(entry_value),
entry.optional());
}
} | #include "parser/macro_expr_factory.h"
#include <cstdint>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/expr.h"
#include "common/expr_factory.h"
#include "internal/testing.h"
namespace cel {
class TestMacroExprFactory final : public MacroExprFactory {
public:
TestMacroExprFactory() : MacroExprFactory() {}
ExprId id() const { return id_; }
Expr ReportError(absl::string_view) override {
return NewUnspecified(NextId());
}
Expr ReportErrorAt(const Expr&, absl::string_view) override {
return NewUnspecified(NextId());
}
using MacroExprFactory::NewBoolConst;
using MacroExprFactory::NewCall;
using MacroExprFactory::NewComprehension;
using MacroExprFactory::NewIdent;
using MacroExprFactory::NewList;
using MacroExprFactory::NewListElement;
using MacroExprFactory::NewMap;
using MacroExprFactory::NewMapEntry;
using MacroExprFactory::NewMemberCall;
using MacroExprFactory::NewSelect;
using MacroExprFactory::NewStruct;
using MacroExprFactory::NewStructField;
using MacroExprFactory::NewUnspecified;
protected:
ExprId NextId() override { return id_++; }
ExprId CopyId(ExprId id) override {
if (id == 0) {
return 0;
}
return NextId();
}
private:
int64_t id_ = 1;
};
namespace {
TEST(MacroExprFactory, CopyUnspecified) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewUnspecified()), factory.NewUnspecified(2));
}
TEST(MacroExprFactory, CopyIdent) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewIdent("foo")), factory.NewIdent(2, "foo"));
}
TEST(MacroExprFactory, CopyConst) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewBoolConst(true)),
factory.NewBoolConst(2, true));
}
TEST(MacroExprFactory, CopySelect) {
TestMacroExprFactory factory;
EXPECT_EQ(factory.Copy(factory.NewSelect(factory.NewIdent("foo"), "bar")),
factory.NewSelect(3, factory.NewIdent(4, "foo"), "bar"));
}
TEST(MacroExprFactory, CopyCall) {
TestMacroExprFactory factory;
std::vector<Expr> copied_args;
copied_args.reserve(1);
copied_args.push_back(factory.NewIdent(6, "baz"));
EXPECT_EQ(factory.Copy(factory.NewMemberCall("bar", factory.NewIdent("foo"),
factory.NewIdent("baz"))),
factory.NewMemberCall(4, "bar", factory.NewIdent(5, "foo"),
absl::MakeSpan(copied_args)));
}
TEST(MacroExprFactory, CopyList) {
TestMacroExprFactory factory;
std::vector<ListExprElement> copied_elements;
copied_elements.reserve(1);
copied_elements.push_back(factory.NewListElement(factory.NewIdent(4, "foo")));
EXPECT_EQ(factory.Copy(factory.NewList(
factory.NewListElement(factory.NewIdent("foo")))),
factory.NewList(3, absl::MakeSpan(copied_elements)));
}
TEST(MacroExprFactory, CopyStruct) {
TestMacroExprFactory factory;
std::vector<StructExprField> copied_fields;
copied_fields.reserve(1);
copied_fields.push_back(
factory.NewStructField(5, "bar", factory.NewIdent(6, "baz")));
EXPECT_EQ(factory.Copy(factory.NewStruct(
"foo", factory.NewStructField("bar", factory.NewIdent("baz")))),
factory.NewStruct(4, "foo", absl::MakeSpan(copied_fields)));
}
TEST(MacroExprFactory, CopyMap) {
TestMacroExprFactory factory;
std::vector<MapExprEntry> copied_entries;
copied_entries.reserve(1);
copied_entries.push_back(factory.NewMapEntry(6, factory.NewIdent(7, "bar"),
factory.NewIdent(8, "baz")));
EXPECT_EQ(factory.Copy(factory.NewMap(factory.NewMapEntry(
factory.NewIdent("bar"), factory.NewIdent("baz")))),
factory.NewMap(5, absl::MakeSpan(copied_entries)));
}
TEST(MacroExprFactory, CopyComprehension) {
TestMacroExprFactory factory;
EXPECT_EQ(
factory.Copy(factory.NewComprehension(
"foo", factory.NewList(), "bar", factory.NewBoolConst(true),
factory.NewIdent("baz"), factory.NewIdent("foo"),
factory.NewIdent("bar"))),
factory.NewComprehension(
7, "foo", factory.NewList(8, std::vector<ListExprElement>()), "bar",
factory.NewBoolConst(9, true), factory.NewIdent(10, "baz"),
factory.NewIdent(11, "foo"), factory.NewIdent(12, "bar")));
}
}
} |
96 | #ifndef TENSORFLOW_TSL_LIB_RANDOM_SIMPLE_PHILOX_H_
#define TENSORFLOW_TSL_LIB_RANDOM_SIMPLE_PHILOX_H_
#include <math.h>
#include <string.h>
#include <algorithm>
#include "tsl/lib/random/philox_random.h"
#include "tsl/lib/random/random_distributions.h"
namespace tsl {
namespace random {
class SimplePhilox {
public:
PHILOX_DEVICE_INLINE
explicit SimplePhilox(PhiloxRandom* gen) : single_(gen) {}
PHILOX_DEVICE_INLINE uint32 Rand32() { return single_(); }
PHILOX_DEVICE_INLINE uint64 Rand64() {
const uint32 lo = single_(), hi = single_();
return lo | static_cast<uint64>(hi) << 32;
}
PHILOX_DEVICE_INLINE float RandFloat() { return Uint32ToFloat(single_()); }
PHILOX_DEVICE_INLINE double RandDouble() {
const uint32 x0 = single_(), x1 = single_();
return Uint64ToDouble(x0, x1);
}
uint32 Uniform(uint32 n);
uint64 Uniform64(uint64 n);
bool OneIn(uint32 n) { return Uniform(n) == 0; }
uint32 Skewed(int max_log);
private:
SingleSampleAdapter<PhiloxRandom> single_;
};
}
}
#endif
#include "tsl/lib/random/simple_philox.h"
#include "tsl/lib/random/exact_uniform_int.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace random {
uint32 SimplePhilox::Uniform(uint32 n) {
return ExactUniformInt<uint32>(n, [this]() { return Rand32(); });
}
uint64 SimplePhilox::Uniform64(uint64 n) {
return ExactUniformInt<uint64>(n, [this]() { return Rand64(); });
}
uint32 SimplePhilox::Skewed(int max_log) {
CHECK(0 <= max_log && max_log <= 32);
const int shift = Rand32() % (max_log + 1);
const uint32 mask = shift == 32 ? ~static_cast<uint32>(0) : (1 << shift) - 1;
return Rand32() & mask;
}
}
} | #include "tsl/lib/random/simple_philox.h"
#include <set>
#include <string>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
namespace {
TEST(SimplePhiloxTest, FloatTest) {
PhiloxRandom philox(7, 7);
SimplePhilox gen(&philox);
static const int kIters = 1000000;
for (int i = 0; i < kIters; ++i) {
float f = gen.RandFloat();
EXPECT_LE(0.0f, f);
EXPECT_GT(1.0f, f);
}
for (int i = 0; i < kIters; ++i) {
double d = gen.RandDouble();
EXPECT_LE(0.0, d);
EXPECT_GT(1.0, d);
}
}
static void DifferenceTest(const char *names, SimplePhilox *gen1,
SimplePhilox *gen2) {
static const int kIters = 100;
bool different = false;
for (int i = 0; i < kIters; ++i) {
if (gen1->Rand32() != gen2->Rand32()) {
different = true;
break;
}
}
CHECK(different) << "different seeds but same output!";
}
TEST(SimplePhiloxTest, DifferenceTest) {
PhiloxRandom philox1(1, 1), philox2(17, 17);
SimplePhilox gen1(&philox1), gen2(&philox2);
DifferenceTest("SimplePhilox: different seeds", &gen1, &gen2);
}
TEST(SimplePhiloxTest, DifferenceTestCloseSeeds) {
PhiloxRandom philox1(1, 1), philox2(2, 1);
SimplePhilox gen1(&philox1), gen2(&philox2);
DifferenceTest("SimplePhilox: close seeds", &gen1, &gen2);
}
TEST(SimplePhiloxTest, Regression_CloseSeedsAreDifferent) {
const int kCount = 1000;
PhiloxRandom philox1(0, 1), philox2(1, 1);
SimplePhilox gen1(&philox1), gen2(&philox2);
std::set<uint32> first;
std::set<uint32> all;
for (int i = 0; i < kCount; ++i) {
uint32 v = gen1.Rand32();
first.insert(v);
all.insert(v);
all.insert(gen2.Rand32());
}
EXPECT_EQ(kCount, first.size());
EXPECT_EQ(2 * kCount, all.size());
}
TEST(SimplePhiloxTest, TestUniform) {
PhiloxRandom philox(17, 17);
SimplePhilox gen(&philox);
uint32 range = 3 * (1L << 29);
uint32 threshold = 1L << 30;
size_t count = 0;
static const int kTrials = 100000;
for (int i = 0; i < kTrials; ++i) {
uint32 rnd = gen.Uniform(range);
if (rnd < threshold) {
++count;
}
}
EXPECT_LT(fabs((threshold + 0.0) / range - (count + 0.0) / kTrials), 0.005);
}
TEST(SimplePhiloxTest, TestUniform64) {
PhiloxRandom philox(17, 17);
SimplePhilox gen(&philox);
uint64 range = 3 * (1LL << 59);
uint64 threshold = 1LL << 60;
size_t count = 0;
static const int kTrials = 100000;
for (int i = 0; i < kTrials; ++i) {
uint64 rnd = gen.Uniform64(range);
if (rnd < threshold) {
++count;
}
}
EXPECT_LT(fabs((threshold + 0.0) / range - (count + 0.0) / kTrials), 0.005);
}
}
}
} |
97 | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FILTER_FUSION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FILTER_FUSION_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class FilterFusion : public TFDataOptimizerBase {
public:
FilterFusion() = default;
~FilterFusion() override = default;
string name() const override { return "filter_fusion"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/filter_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeFusedFilterNode(const NodeDef& first_filter_node,
const NodeDef& second_filter_node,
const FunctionDef& fused_function,
MutableGraphView* graph) {
NodeDef fused_node;
graph_utils::SetUniqueGraphNodeName("fused_filter", graph->graph(),
&fused_node);
fused_node.set_op("FilterDataset");
fused_node.add_input(first_filter_node.input(0));
auto attr = first_filter_node.attr().at("predicate");
*attr.mutable_func()->mutable_name() = fused_function.signature().name();
(*fused_node.mutable_attr())["predicate"] = std::move(attr);
graph_utils::CopyAttribute("Targuments", first_filter_node, &fused_node);
for (auto key : {"output_shapes", "output_types"})
graph_utils::CopyAttribute(key, second_filter_node, &fused_node);
graph_utils::MaybeSetFusedMetadata(first_filter_node, second_filter_node,
&fused_node);
return fused_node;
}
}
Status FilterFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
GraphDef sorted_old_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(&sorted_old_graph));
*output = sorted_old_graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
output->library());
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == "FilterDataset" && node.input_size() == 1) return &node;
return nullptr;
};
auto make_fused_function =
[&](const NodeDef* first_filter_node,
const NodeDef* second_filter_node) -> FunctionDef* {
const auto& parent_fun = first_filter_node->attr().at("predicate");
const FunctionDef* first_func =
function_library.Find(parent_fun.func().name());
const auto& fun = second_filter_node->attr().at("predicate");
const FunctionDef* second_func = function_library.Find(fun.func().name());
if (!fusion_utils::HasSameSignature(first_func->signature(),
second_func->signature())) {
VLOG(1) << "Can't fuse Filters because they have different signature\n";
return nullptr;
}
return fusion_utils::FuseFunctions(
*first_func, *second_func, "fused_predicate",
fusion_utils::SameSignature, fusion_utils::SameInput,
fusion_utils::LazyConjunctionOutput, fusion_utils::LazyConjunctionNodes,
output->mutable_library());
};
for (const NodeDef& node : sorted_old_graph.node()) {
const NodeDef* second_filter_node = get_filter_node(node);
if (!second_filter_node) continue;
const NodeDef* first_filter_node =
get_filter_node(*graph_utils::GetInputNode(*second_filter_node, graph));
if (!first_filter_node) continue;
const auto* fused_predicate =
make_fused_function(first_filter_node, second_filter_node);
if (!fused_predicate) continue;
const auto* fused_filter_node = graph.AddNode(MakeFusedFilterNode(
*first_filter_node, *second_filter_node, *fused_predicate, &graph));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(second_filter_node->name(),
fused_filter_node->name()));
TF_RETURN_IF_ERROR(function_library.AddFunctionDef(*fused_predicate));
nodes_to_delete.insert(first_filter_node->name());
nodes_to_delete.insert(second_filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(FilterFusion, "filter_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/filter_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using graph_tests_utils::MakeFilterNode;
TEST(FilterFusionTest, FuseTwoFilterIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range"),
MakeFilterNode("filter2", "filter1")},
{
test::function::IsZero(),
});
FilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
}
TEST(FilterFusionTest, FuseThreeNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range"), MakeFilterNode("filter2", "filter1"),
MakeFilterNode("filter3", "filter2"),
NDef("cache", "CacheDataset", {"filter3", "filename"}, {})},
{
test::function::IsZero(),
});
FilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter3", output));
}
}
}
} |
98 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_THIN_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_THIN_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
class ConvolutionTransposedThin : public GPUOperation {
public:
ConvolutionTransposedThin() = default;
int3 GetGridSize() const override;
ConvolutionTransposedThin(ConvolutionTransposedThin&& operation);
ConvolutionTransposedThin& operator=(ConvolutionTransposedThin&& operation);
ConvolutionTransposedThin(const ConvolutionTransposedThin&) = delete;
ConvolutionTransposedThin& operator=(const ConvolutionTransposedThin&) =
delete;
private:
friend ConvolutionTransposedThin CreateConvolutionTransposedThin(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr);
ConvolutionTransposedThin(const OperationDef& definition,
const ConvolutionTransposedAttributes& attr,
const GpuInfo& gpu_info);
template <DataType T>
void UploadData(const tflite::gpu::Tensor<OHWI, T>& weights,
const tflite::gpu::Tensor<Linear, T>& biases);
template <DataType S, typename T>
void RearrangeWeightsData(const tflite::gpu::Tensor<OHWI, S>& weights,
absl::Span<T> dst);
std::string GenerateConvolutionTransposedCode(const OperationDef& op_def,
int src_depth, int dst_channels,
const int2& kernel_size);
};
template <DataType T>
void ConvolutionTransposedThin::UploadData(
const tflite::gpu::Tensor<OHWI, T>& weights,
const tflite::gpu::Tensor<Linear, T>& biases) {
const int src_depth = DivideRoundUp(weights.shape.i, 4);
const int flt4_count =
weights.shape.w * weights.shape.h * src_depth * weights.shape.o;
const bool f32_weights = definition_.precision == CalculationsPrecision::F32;
const int flt4_size = f32_weights ? sizeof(float4) : sizeof(half4);
BufferDescriptor desc;
desc.element_type = f32_weights ? DataType::FLOAT32 : DataType::FLOAT16;
desc.element_size = 4;
desc.memory_type = MemoryType::CONSTANT;
desc.size = flt4_size * (flt4_count + 1);
desc.data.resize(desc.size);
if (f32_weights) {
float4* gpu_data = reinterpret_cast<float4*>(desc.data.data());
RearrangeWeightsData(weights, absl::MakeSpan(gpu_data, flt4_count));
float4 bias_value(0.0f);
for (int i = 0; i < weights.shape.o; ++i) {
bias_value[i] = biases.data[i];
}
gpu_data[flt4_count] = bias_value;
} else {
half4* gpu_data = reinterpret_cast<half4*>(desc.data.data());
RearrangeWeightsData(weights, absl::MakeSpan(gpu_data, flt4_count));
half4 bias_value(0.0f);
for (int i = 0; i < weights.shape.o; ++i) {
bias_value[i] = biases.data[i];
}
gpu_data[flt4_count] = bias_value;
}
args_.AddObject("weights",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
template <DataType S, typename T>
void ConvolutionTransposedThin::RearrangeWeightsData(
const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) {
const int src_depth = DivideRoundUp(weights.shape.i, 4);
const int kernel_x = weights.shape.w;
const int kernel_y = weights.shape.h;
int counter = 0;
for (int s = 0; s < src_depth; ++s) {
for (int y = 0; y < kernel_y; ++y) {
for (int x = 0; x < kernel_x; ++x) {
std::vector<T> filters(weights.shape.o);
for (int j = 0; j < weights.shape.o; ++j) {
for (int i = 0; i < 4; ++i) {
const int s_ch = s * 4 + i;
const int d_ch = j;
if (s_ch < weights.shape.i && d_ch < weights.shape.o) {
const int f_index = weights.shape.LinearIndex({d_ch, y, x, s_ch});
filters[j][i] = weights.data[f_index];
} else {
filters[j][i] = 0.0f;
}
}
}
for (int j = 0; j < weights.shape.o; ++j) {
dst[counter++] = filters[j];
}
}
}
}
}
bool IsConvolutionTransposedThinSupported(
const ConvolutionTransposedAttributes& attr);
ConvolutionTransposedThin CreateConvolutionTransposedThin(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
ConvolutionTransposedThin::ConvolutionTransposedThin(
const OperationDef& definition, const ConvolutionTransposedAttributes& attr,
const GpuInfo& gpu_info)
: GPUOperation(definition) {
code_ = GenerateConvolutionTransposedCode(
definition_, DivideRoundUp(attr.weights.shape.i, 4), attr.weights.shape.o,
int2(attr.weights.shape.w, attr.weights.shape.h));
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) {
compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd);
}
}
ConvolutionTransposedThin::ConvolutionTransposedThin(
ConvolutionTransposedThin&& operation)
: GPUOperation(std::move(operation)) {}
ConvolutionTransposedThin& ConvolutionTransposedThin::operator=(
ConvolutionTransposedThin&& operation) {
if (this != &operation) {
GPUOperation::operator=(std::move(operation));
}
return *this;
}
std::string ConvolutionTransposedThin::GenerateConvolutionTransposedCode(
const OperationDef& op_def, int src_depth, int dst_channels,
const int2& kernel_size) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
const std::string channel_x = dst_channels == 1 ? "" : ".x";
const std::vector<std::string> postfix = {channel_x, ".y", ".z", ".w"};
const std::vector<std::string> channel = {".x", ".y", ".z", ".w"};
const std::string type_postfix =
dst_channels == 1 ? "" : std::to_string(dst_channels);
std::string accum_type;
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F32_F16:
accum_type = "float" + type_postfix;
break;
case CalculationsPrecision::F16:
accum_type = "half" + type_postfix;
break;
}
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.IsBatchSupported()) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
c += " args.src_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " if (X >= args.src_tensor.Width() || Y >= args.src_tensor.Height()) "
"return;\n";
c += " " + accum_type + " r[" + std::to_string(kernel_size.y) + "][" +
std::to_string(kernel_size.x) + "];\n";
c += " {\n";
c += " FLT4 src = args.src_tensor.Read(X, Y, 0);\n";
int index = 0;
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
std::string r_s =
" r[" + std::to_string(y) + "][" + std::to_string(x) + "]";
for (int d = 0; d < dst_channels; ++d) {
c += r_s + postfix[d] + " = dot(src, args.weights.Read(" +
std::to_string(index) + "));\n";
index++;
}
}
}
c += " }\n";
for (int i = 1; i < src_depth; ++i) {
c += " if (X > " + std::to_string(-i) +
") {
c +=
" FLT4 src = args.src_tensor.Read(X, Y, " + std::to_string(i) + ");\n";
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
std::string r_s =
" r[" + std::to_string(y) + "][" + std::to_string(x) + "]";
for (int d = 0; d < dst_channels; ++d) {
c += r_s + postfix[d] + " += dot(src, args.weights.Read(" +
std::to_string(index) + "));\n";
index++;
}
}
}
c += " }\n";
}
c += " X *= " + std::to_string(kernel_size.x) + ";\n";
c += " Y *= " + std::to_string(kernel_size.y) + ";\n";
for (int y = 0; y < kernel_size.y; ++y) {
for (int x = 0; x < kernel_size.x; ++x) {
const std::string x_coord = "X + " + std::to_string(x);
const std::string y_coord = "Y + " + std::to_string(y);
c += " if (" + x_coord + " < args.dst_tensor.Width() && " + y_coord +
" < args.dst_tensor.Height()) {\n";
c += " FLT4 result = args.weights.Read(" + std::to_string(index) +
");\n";
for (int d = 0; d < dst_channels; ++d) {
c += " result" + channel[d] + " += r[" + std::to_string(y) + "][" +
std::to_string(x) + "]" + postfix[d] + ";\n";
}
c += " args.dst_tensor.Write(result, " + x_coord + ", " + y_coord +
", 0);\n";
c += " }\n";
}
}
c += "}\n";
return c;
}
int3 ConvolutionTransposedThin::GetGridSize() const {
const int grid_x = src_[0]->Width() * dst_[0]->Batch();
const int grid_y = src_[0]->Height();
const int grid_z = 1;
return int3(grid_x, grid_y, grid_z);
}
bool IsConvolutionTransposedThinSupported(
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.o <= 4 && attr.weights.shape.w == attr.stride.w &&
attr.weights.shape.h == attr.stride.h &&
attr.padding.prepended.w == 0 && attr.padding.prepended.h == 0 &&
attr.padding.appended.w == 0 && attr.padding.appended.h == 0;
}
ConvolutionTransposedThin CreateConvolutionTransposedThin(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
ConvolutionTransposedThin result(definition, attr, gpu_info);
result.UploadData(attr.weights, attr.bias);
return result;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvolutionTransposedThinSimpleWeights) {
auto status = ConvolutionTransposedThinSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvolutionTransposedThin) {
auto status = ConvolutionTransposedThinTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} |
99 | #ifndef AROLLA_QEXPR_OPERATORS_STRINGS_FORMAT_H_
#define AROLLA_QEXPR_OPERATORS_STRINGS_FORMAT_H_
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/bytes.h"
namespace arolla {
constexpr absl::string_view kFormatOperatorName = "strings.format";
class FormatOperatorFamily : public OperatorFamily {
public:
using returns_status_or = std::true_type;
template <class... Args>
auto operator()(const Args&... args) const {
if constexpr ((is_optional_v<Args> || ...)) {
if ((ArgPresent(args) && ...)) {
auto res_or = FormatImpl(ArgValue(args)...);
if (res_or.ok()) {
return absl::StatusOr<OptionalValue<Bytes>>(res_or.value());
} else {
return absl::StatusOr<OptionalValue<Bytes>>(res_or.status());
}
} else {
return absl::StatusOr<OptionalValue<Bytes>>(OptionalValue<Bytes>());
}
} else {
return FormatImpl(args...);
}
}
private:
template <typename T>
bool ArgPresent(const T& arg) const {
if constexpr (is_optional_v<T>) {
return arg.present;
} else {
return true;
}
}
template <typename T>
const auto& ArgValue(const T& arg) const {
if constexpr (is_optional_v<T>) {
return arg.value;
} else {
return arg;
}
}
template <typename T>
static constexpr bool IsSupportedArgType() {
return std::is_same_v<T, int32_t> || std::is_same_v<T, int64_t> ||
std::is_same_v<T, float> || std::is_same_v<T, double> ||
std::is_same_v<T, Bytes> || std::is_same_v<T, bool>;
}
template <typename... Args>
struct FirstUnsupported;
template <typename Arg>
struct FirstUnsupported<Arg> {
using type = Arg;
};
template <typename Arg, typename... Args>
struct FirstUnsupported<Arg, Args...> {
using type =
std::conditional_t<IsSupportedArgType<Arg>(),
typename FirstUnsupported<Args...>::type, Arg>;
};
template <class... Args>
absl::StatusOr<Bytes> FormatImpl(const Bytes& format_spec,
const Args&... args) const {
if constexpr ((IsSupportedArgType<Args>() && ...)) {
absl::UntypedFormatSpec fmt(format_spec);
std::string out;
if (absl::FormatUntyped(&out, fmt, {absl::FormatArg(args)...})) {
return Bytes(std::move(out));
} else {
return absl::InvalidArgumentError(absl::StrFormat(
"format specification '%s' doesn't match format arguments",
format_spec));
}
} else {
return absl::InvalidArgumentError(absl::StrFormat(
"%s is not a supported format argument type",
GetQType<typename FirstUnsupported<Args...>::type>()->name()));
}
}
absl::StatusOr<OperatorPtr> DoGetOperator(
absl::Span<const QTypePtr> input_types, QTypePtr output_type) const final;
};
}
#endif
#include "arolla/qexpr/operators/strings/format.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operator_errors.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/indestructible.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
template <typename T>
using Slot = FrameLayout::Slot<T>;
class ValueHolder {
public:
const absl::string_view& AddValue(absl::string_view value) {
return values_.emplace_back(value);
}
private:
std::deque<absl::string_view> values_;
};
template <typename T>
absl::FormatArg WrapValueImpl(const void* source, ValueHolder*) {
const T& value_ref = *(reinterpret_cast<const T*>(source));
return absl::FormatArg(value_ref);
}
template <>
absl::FormatArg WrapValueImpl<Bytes>(const void* source,
ValueHolder* value_holder) {
const Bytes& value_ref = *(reinterpret_cast<const Bytes*>(source));
return absl::FormatArg(value_holder->AddValue(value_ref));
}
using WrapValueFn = absl::FormatArg (*)(const void*, ValueHolder*);
absl::StatusOr<WrapValueFn> GetWrapValueFn(QTypePtr qtype) {
static const Indestructible<absl::flat_hash_map<QTypePtr, WrapValueFn>>
converter_map([](void* self) {
new (self) absl::flat_hash_map<QTypePtr, WrapValueFn>{
{GetQType<int32_t>(), &WrapValueImpl<int32_t>},
{GetQType<int64_t>(), &WrapValueImpl<int64_t>},
{GetQType<float>(), &WrapValueImpl<float>},
{GetQType<double>(), &WrapValueImpl<double>},
{GetWeakFloatQType(), &WrapValueImpl<double>},
{GetQType<Bytes>(), &WrapValueImpl<Bytes>},
{GetQType<bool>(), &WrapValueImpl<bool>}};
});
auto iter = converter_map->find(qtype);
if (iter == converter_map->end()) {
return absl::InvalidArgumentError(absl::StrFormat(
"%s is not a supported format argument type", qtype->name()));
}
return iter->second;
}
class SlotFormatter {
public:
static absl::StatusOr<SlotFormatter> Create(TypedSlot slot) {
ASSIGN_OR_RETURN(auto wrap_value_fn, GetWrapValueFn(slot.GetType()));
return SlotFormatter(slot, wrap_value_fn);
}
absl::FormatArg Format(FramePtr frame, ValueHolder* value_holder) const {
TypedRef ref = TypedRef::FromSlot(slot_, frame);
return wrap_value_fn_(ref.GetRawPointer(), value_holder);
}
private:
SlotFormatter(TypedSlot slot, WrapValueFn wrap_value_fn)
: slot_(slot), wrap_value_fn_(wrap_value_fn) {}
TypedSlot slot_;
WrapValueFn wrap_value_fn_;
};
class FormatBoundOperator : public BoundOperator {
public:
FormatBoundOperator(Slot<Bytes> format_spec_slot,
std::vector<SlotFormatter> slot_formatters,
Slot<Bytes> output_slot)
: format_spec_slot_(format_spec_slot),
slot_formatters_(std::move(slot_formatters)),
output_slot_(output_slot) {}
void Run(EvaluationContext* ctx, FramePtr frame) const override {
absl::string_view fmt_spec = frame.Get(format_spec_slot_);
absl::UntypedFormatSpec fmt(fmt_spec);
ValueHolder value_holder;
std::vector<absl::FormatArg> fmt_args;
fmt_args.reserve(slot_formatters_.size());
for (const auto& slot_formatter : slot_formatters_) {
fmt_args.push_back(slot_formatter.Format(frame, &value_holder));
}
std::string out;
if (absl::FormatUntyped(&out, fmt, fmt_args)) {
frame.Set(output_slot_, Bytes(std::move(out)));
} else {
ctx->set_status(absl::InvalidArgumentError(absl::StrFormat(
"format specification '%s' doesn't match format arguments",
fmt_spec)));
}
}
private:
Slot<Bytes> format_spec_slot_;
std::vector<SlotFormatter> slot_formatters_;
Slot<Bytes> output_slot_;
};
class FormatOperator : public QExprOperator {
public:
explicit FormatOperator(const QExprOperatorSignature* type)
: QExprOperator(std::string(kFormatOperatorName), type) {}
private:
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> typed_input_slots,
TypedSlot typed_output_slot) const override {
std::vector<Slot<bool>> presence_slots;
Slot<Bytes> format_spec_slot = Slot<Bytes>::UnsafeSlotFromOffset(
0);
if (IsOptionalQType(typed_input_slots[0].GetType())) {
DCHECK_EQ(typed_input_slots[0].SubSlotCount(), 2);
ASSIGN_OR_RETURN(auto presence_slot,
typed_input_slots[0].SubSlot(0).ToSlot<bool>());
presence_slots.push_back(presence_slot);
ASSIGN_OR_RETURN(format_spec_slot,
typed_input_slots[0].SubSlot(1).ToSlot<Bytes>());
} else {
ASSIGN_OR_RETURN(format_spec_slot, typed_input_slots[0].ToSlot<Bytes>());
}
auto arg_slots = typed_input_slots.subspan(1);
std::vector<SlotFormatter> slot_formatters;
slot_formatters.reserve(arg_slots.size());
for (auto arg_slot : arg_slots) {
TypedSlot value_slot = arg_slot;
if (IsOptionalQType(arg_slot.GetType())) {
ASSIGN_OR_RETURN(Slot<bool> presence_slot,
GetPresenceSubslotFromOptional(arg_slot));
presence_slots.push_back(presence_slot);
ASSIGN_OR_RETURN(value_slot, GetValueSubslotFromOptional(arg_slot));
}
ASSIGN_OR_RETURN(auto slot_formatter, SlotFormatter::Create(value_slot));
slot_formatters.push_back(slot_formatter);
}
if (presence_slots.empty()) {
ASSIGN_OR_RETURN(Slot<Bytes> output_slot,
typed_output_slot.ToSlot<Bytes>());
return {std::make_unique<FormatBoundOperator>(
format_spec_slot, slot_formatters, output_slot)};
} else {
ASSIGN_OR_RETURN(Slot<OptionalValue<Bytes>> output_slot,
typed_output_slot.ToSlot<OptionalValue<Bytes>>());
FormatBoundOperator format_op(format_spec_slot, slot_formatters,
GetValueSubslotFromOptional(output_slot));
return {std::unique_ptr<BoundOperator>(new WhereAllBoundOperator(
presence_slots, GetPresenceSubslotFromOptional(output_slot),
std::move(format_op)))};
}
}
};
}
absl::StatusOr<OperatorPtr> FormatOperatorFamily::DoGetOperator(
absl::Span<const QTypePtr> input_types, QTypePtr output_type) const {
if (input_types.empty()) {
return OperatorNotDefinedError(kFormatOperatorName, input_types,
"expected at least 1 argument");
}
if (DecayOptionalQType(input_types[0]) != GetQType<Bytes>()) {
return OperatorNotDefinedError(kFormatOperatorName, input_types,
"format_spec must have BYTES QType");
}
bool has_optional_arg = IsOptionalQType(input_types[0]);
for (size_t i = 1; i < input_types.size(); ++i) {
QTypePtr value_type = input_types[i];
if (IsOptionalQType(value_type)) {
has_optional_arg = true;
value_type = DecayOptionalQType(value_type);
}
RETURN_IF_ERROR(GetWrapValueFn(value_type).status());
}
QTypePtr result_type =
has_optional_arg ? GetQType<OptionalValue<Bytes>>() : GetQType<Bytes>();
return EnsureOutputQTypeMatches(
OperatorPtr(std::make_unique<FormatOperator>(
QExprOperatorSignature::Get(input_types, result_type))),
input_types, output_type);
}
} | #include "arolla/qexpr/operators/strings/format.h"
#include <cstdint>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/util/bytes.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/text.h"
namespace arolla {
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
namespace {
template <typename EvalAsFunctor>
class FormatTest : public ::testing::Test {
public:
template <typename... Args>
absl::StatusOr<Bytes> InvokeOperator(const Args&... args) {
if constexpr (EvalAsFunctor::value) {
auto result = FormatOperatorFamily{}(args...);
static_assert(std::is_same_v<decltype(result), absl::StatusOr<Bytes>>);
return result;
} else {
return ::arolla::InvokeOperator<Bytes>("strings._format_bytes", args...);
}
}
template <typename... Args>
absl::StatusOr<OptionalValue<Bytes>> InvokeOperatorOptional(
const Args&... args) {
if constexpr (EvalAsFunctor::value) {
auto result = FormatOperatorFamily{}(args...);
static_assert(std::is_same_v<decltype(result),
absl::StatusOr<OptionalValue<Bytes>>>);
return result;
} else {
return ::arolla::InvokeOperator<OptionalValue<Bytes>>(
"strings._format_bytes", args...);
}
}
};
TYPED_TEST_SUITE_P(FormatTest);
TYPED_TEST_P(FormatTest, FormatFloats) {
Bytes format_spec("a=%0.2f b=%0.3f");
float a = 20.5f;
double b = 3.75;
EXPECT_THAT(this->InvokeOperator(format_spec, a, b),
IsOkAndHolds(Bytes("a=20.50 b=3.750")));
}
TYPED_TEST_P(FormatTest, FormatIntegers) {
Bytes format_spec("c=%02d, d=%d");
int32_t c = 3;
int64_t d = 4;
EXPECT_THAT(this->InvokeOperator(format_spec, c, d),
IsOkAndHolds(Bytes("c=03, d=4")));
}
TYPED_TEST_P(FormatTest, FormatText) {
Bytes format_spec("%s is %d years older than %s.");
EXPECT_THAT(
this->InvokeOperator(format_spec, Bytes("Sophie"), 2, Bytes("Katie")),
IsOkAndHolds(Bytes("Sophie is 2 years older than Katie.")));
}
TYPED_TEST_P(FormatTest, FormatOptional) {
Bytes format_spec("The atomic weight of %s is %0.3f");
EXPECT_THAT(
this->InvokeOperatorOptional(format_spec, OptionalValue<Bytes>("Iron"),
OptionalValue<float>(55.845)),
IsOkAndHolds(
OptionalValue<Bytes>("The atomic weight of Iron is 55.845")));
EXPECT_THAT(this->InvokeOperatorOptional(OptionalValue<Bytes>(format_spec),
OptionalValue<Bytes>("Iron"),
OptionalValue<float>(55.845)),
IsOkAndHolds(
OptionalValue<Bytes>("The atomic weight of Iron is 55.845")));
EXPECT_THAT(this->InvokeOperatorOptional(format_spec,
OptionalValue<Bytes>("Unobtainium"),
OptionalValue<float>{}),
IsOkAndHolds(OptionalValue<Bytes>{}));
EXPECT_THAT(this->InvokeOperatorOptional(OptionalValue<Bytes>(),
OptionalValue<Bytes>("Unobtainium"),
OptionalValue<float>{0}),
IsOkAndHolds(OptionalValue<Bytes>{}));
}
TYPED_TEST_P(FormatTest, FormatMismatchedTypes) {
Bytes format_spec("%s's atomic weight is %f");
EXPECT_THAT(this->InvokeOperator(format_spec, 1.0079, Bytes("Hydrogen")),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("doesn't match format arguments")));
}
TYPED_TEST_P(FormatTest, FormatUnsupportedType) {
Bytes format_spec("Payload is %s.");
EXPECT_THAT(
this->InvokeOperator(format_spec, Text("abc")),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("TEXT is not a supported format argument type")));
}
REGISTER_TYPED_TEST_SUITE_P(FormatTest, FormatFloats, FormatIntegers,
FormatText, FormatOptional, FormatMismatchedTypes,
FormatUnsupportedType);
INSTANTIATE_TYPED_TEST_SUITE_P(Operator, FormatTest, std::bool_constant<false>);
INSTANTIATE_TYPED_TEST_SUITE_P(Functor, FormatTest, std::bool_constant<true>);
}
} |