ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
1ce4f09f-ee50-4e20-8d02-024686a491c7 | cpp | google/tensorstore | schema | tensorstore/proto/schema.cc | tensorstore/proto/schema_test.cc | #include "tensorstore/proto/schema.h"
#include <stddef.h>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/proto/array.h"
#include "tensorstore/proto/index_transform.h"
#include "tensorstore/proto/schema.pb.h"
#include "tensorstore/rank.h"
#include "tensorstore/schema.h"
#include "tensorstore/serialization/batch.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/unit.h"
namespace tensorstore {
namespace {
void EncodeToProto(::tensorstore::proto::OptionalUnit& proto,
const std::optional<Unit>& unit) {
if (unit.has_value()) {
proto.set_base_unit(unit->base_unit);
proto.set_multiplier(unit->multiplier);
}
}
bool IsValidGridView(ChunkLayout::GridView view) {
return (view.aspect_ratio().valid() || view.elements().valid() ||
view.shape().valid());
}
void EncodeToProto(::tensorstore::proto::ChunkLayout& proto,
const ChunkLayout& chunk_layout) {
auto encode_grid =
[](::tensorstore::proto::ChunkLayout::Grid& proto,
ChunkLayout::GridView grid_view) {
{
DimensionSet soft_constraints(false);
auto shape = grid_view.shape();
for (size_t i = 0; i < shape.size(); i++) {
proto.add_shape(shape[i]);
soft_constraints[i] = !shape.hard_constraint[i];
}
if (soft_constraints) {
proto.set_shape_soft_constraint_bitset(soft_constraints.to_uint());
}
}
{
DimensionSet soft_constraints(false);
auto aspect_ratio = grid_view.aspect_ratio();
for (size_t i = 0; i < aspect_ratio.size(); i++) {
proto.add_aspect_ratio(aspect_ratio[i]);
soft_constraints[i] = !aspect_ratio.hard_constraint[i];
}
if (soft_constraints) {
proto.set_aspect_ratio_soft_constraint_bitset(
soft_constraints.to_uint());
}
}
if (grid_view.elements().valid()) {
proto.set_elements(grid_view.elements().value);
if (!grid_view.elements().hard_constraint) {
proto.set_elements_soft_constraint(true);
}
}
};
{
DimensionSet grid_origin_soft_constraint_bitset(false);
auto grid_origin = chunk_layout.grid_origin();
for (size_t i = 0; i < grid_origin.size(); i++) {
proto.add_grid_origin(grid_origin[i]);
grid_origin_soft_constraint_bitset[i] = !grid_origin.hard_constraint[i];
}
if (grid_origin_soft_constraint_bitset) {
proto.set_grid_origin_soft_constraint_bitset(
grid_origin_soft_constraint_bitset.to_uint());
}
}
{
auto inner_order = chunk_layout.inner_order();
if (!inner_order.hard_constraint) {
proto.set_inner_order_soft_constraint(true);
}
for (size_t i = 0; i < inner_order.size(); i++) {
proto.add_inner_order(inner_order[i]);
}
}
if (IsValidGridView(chunk_layout.read_chunk())) {
encode_grid(*proto.mutable_read_chunk(), chunk_layout.read_chunk());
}
if (IsValidGridView(chunk_layout.write_chunk())) {
encode_grid(*proto.mutable_write_chunk(), chunk_layout.write_chunk());
}
if (IsValidGridView(chunk_layout.codec_chunk())) {
encode_grid(*proto.mutable_codec_chunk(), chunk_layout.codec_chunk());
}
}
Result<ChunkLayout> ParseChunkLayoutFromProto(
const ::tensorstore::proto::ChunkLayout& proto) {
auto parse_grid = [](const ::tensorstore::proto::ChunkLayout::Grid& proto)
-> Result<ChunkLayout::Grid> {
ChunkLayout::Grid grid;
if (proto.shape_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.shape_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::Shape(
tensorstore::span(proto.shape()), ~soft_constraints)));
}
if (proto.aspect_ratio_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.aspect_ratio_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::AspectRatio(
tensorstore::span(proto.aspect_ratio()), ~soft_constraints)));
}
if (proto.has_elements()) {
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::Elements(
proto.elements(), !proto.elements_soft_constraint())));
}
return grid;
};
ChunkLayout chunk_layout;
if (proto.grid_origin_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.grid_origin_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(ChunkLayout::GridOrigin(
tensorstore::span(proto.grid_origin()), ~soft_constraints)));
}
if (proto.inner_order_size() > 0) {
std::vector<DimensionIndex> inner_order(proto.inner_order().begin(),
proto.inner_order().end());
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(ChunkLayout::InnerOrder(
inner_order, !proto.inner_order_soft_constraint())));
}
if (proto.has_read_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.read_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kRead>(grid)));
}
if (proto.has_write_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.write_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kWrite>(grid)));
}
if (proto.has_codec_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.codec_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kCodec>(grid)));
}
return chunk_layout;
}
}
void EncodeToProto(::tensorstore::proto::Schema& proto,
const Schema& schema) {
if (DimensionIndex rank = schema.rank(); rank != dynamic_rank) {
proto.set_rank(rank);
}
if (DataType dtype = schema.dtype(); dtype.valid()) {
proto.set_dtype(std::string(dtype.name()));
}
if (IndexDomain<> domain = schema.domain(); domain.valid()) {
EncodeToProto(*proto.mutable_domain(), domain);
}
EncodeToProto(*proto.mutable_chunk_layout(), schema.chunk_layout());
if (Schema::FillValue fill_value = schema.fill_value(); fill_value.valid()) {
EncodeToProto(*proto.mutable_fill_value(), fill_value);
}
if (CodecSpec codec = schema.codec(); codec.valid()) {
auto serialized = tensorstore::serialization::EncodeBatch(schema.codec());
proto.set_codec(serialized.value());
}
if (Schema::DimensionUnits dimension_units = schema.dimension_units();
dimension_units.valid()) {
for (const auto& unit : dimension_units) {
EncodeToProto(*proto.add_dimension_unit(), unit);
}
}
}
Result<Schema> ParseSchemaFromProto(const ::tensorstore::proto::Schema& proto) {
Schema schema;
if (proto.has_rank()) {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(RankConstraint(proto.rank())));
}
if (proto.has_dtype() && !proto.dtype().empty()) {
auto dtype = GetDataType(proto.dtype());
if (!dtype.valid()) {
return absl::InvalidArgumentError("dtype is not valid");
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(dtype));
}
if (proto.has_domain()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto domain,
ParseIndexDomainFromProto(proto.domain()))
TENSORSTORE_RETURN_IF_ERROR(schema.Set(domain));
}
if (proto.has_chunk_layout()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto chunk_layout, ParseChunkLayoutFromProto(proto.chunk_layout()))
TENSORSTORE_RETURN_IF_ERROR(schema.Set(chunk_layout));
}
if (proto.has_codec()) {
CodecSpec codec;
TENSORSTORE_RETURN_IF_ERROR(
tensorstore::serialization::DecodeBatch(proto.codec(), codec));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(codec));
}
if (proto.has_fill_value()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto array, ParseArrayFromProto(proto.fill_value(), zero_origin));
TENSORSTORE_ASSIGN_OR_RETURN(auto fill_value,
ArrayOriginCast<zero_origin>(array));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(Schema::FillValue(fill_value)));
}
if (!proto.dimension_unit().empty()) {
DimensionUnitsVector dimensions;
for (size_t i = 0; i < proto.dimension_unit_size(); i++) {
auto& unit = proto.dimension_unit(i);
if (unit.has_multiplier() || !unit.base_unit().empty()) {
dimensions.emplace_back(std::in_place, unit.multiplier(),
unit.base_unit());
} else {
dimensions.emplace_back(std::nullopt);
}
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(Schema::DimensionUnits(dimensions)));
}
return schema;
}
} | #include "tensorstore/proto/schema.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/proto/schema.pb.h"
#include "tensorstore/schema.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ParseSchemaFromProto;
using ::tensorstore::Schema;
template <typename Proto>
Proto ParseProtoOrDie(const std::string& asciipb) {
return protobuf_matchers::internal::MakePartialProtoFromAscii<Proto>(asciipb);
}
auto DoEncode(const Schema& schema) {
::tensorstore::proto::Schema proto;
::tensorstore::EncodeToProto(proto, schema);
return proto;
}
TEST(SchemaProtoTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto schema,
Schema::FromJson(
{
{"rank", 3},
{"dtype", "uint8"},
{"domain",
{{"labels", {"x", "y", "z"}},
{"inclusive_min", {1, 2, 3}},
{"exclusive_max", {5, 6, 7}}}},
{"chunk_layout",
{
{"codec_chunk",
{
{"elements_soft_constraint", 20},
{"aspect_ratio", {1, 2, 3}},
{"shape", {nullptr, 4, 5}},
}},
{"read_chunk",
{
{"elements", 30},
{"aspect_ratio", {4, 5, 6}},
{"shape_soft_constraint", {6, nullptr, 7}},
}},
{"write_chunk",
{
{"elements", 40},
{"aspect_ratio_soft_constraint", {7, 8, 9}},
{"shape", {8, 9, nullptr}},
}},
{"grid_origin", {nullptr, nullptr, 11}},
{"inner_order_soft_constraint", {2, 0, 1}},
}},
{"fill_value", 5},
{"dimension_units", {{4, "nm"}, nullptr, {30, "nm"}}},
}));
auto proto = ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: 3
dtype: "uint8"
domain {
origin: [ 1, 2, 3 ]
shape: [ 4, 4, 4 ]
labels: [ "x", "y", "z" ]
}
chunk_layout {
grid_origin: [ -9223372036854775808, -9223372036854775808, 11 ]
grid_origin_soft_constraint_bitset: 3
inner_order: [ 2, 0, 1 ]
inner_order_soft_constraint: true
write_chunk {
aspect_ratio: [ 7, 8, 9 ]
shape: [ 8, 9, 0 ]
elements: 40
aspect_ratio_soft_constraint_bitset: 7
shape_soft_constraint_bitset: 4
}
read_chunk {
shape: [ 6, 0, 7 ]
elements: 30
aspect_ratio: [ 4, 5, 6 ]
shape_soft_constraint_bitset: 7
}
codec_chunk {
elements: 20
shape: [ 0, 4, 5 ]
aspect_ratio: [ 1, 2, 3 ]
elements_soft_constraint: true
shape_soft_constraint_bitset: 1
}
}
fill_value { dtype: "uint8" void_data: "\x05" }
dimension_unit { multiplier: 4 base_unit: "nm" }
dimension_unit {}
dimension_unit { multiplier: 30 base_unit: "nm" }
)pb");
EXPECT_THAT(DoEncode(schema), EqualsProto(proto));
EXPECT_THAT(ParseSchemaFromProto(proto), testing::Eq(schema));
}
TEST(SchemaProtoTest, Empty) {
tensorstore::Schema schema;
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
)pb")),
testing::Eq(schema));
}
TEST(SchemaProtoTest, RankFromDimensionUnit) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto schema,
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: 1
dimension_unit {}
)pb")));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
dimension_unit {}
)pb")),
testing::Eq(schema));
}
TEST(SchemaProtoTest, Errors) {
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: -2
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
dtype: "foo"
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
codec: "12345"
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/schema.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/schema_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
61064559-7093-4c01-8255-fe9997c3c16e | cpp | google/tensorstore | index_interval | tensorstore/index_interval.cc | tensorstore/index_interval_test.cc | #include "tensorstore/index_interval.h"
#include <ostream>
#include "absl/status/status.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
Result<IndexInterval> IndexInterval::Closed(Index inclusive_min,
Index inclusive_max) {
if (!ValidClosed(inclusive_min, inclusive_max)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("(", inclusive_min, ", ", inclusive_max,
") do not specify a valid closed index interval"));
}
return UncheckedClosed(inclusive_min, inclusive_max);
}
Result<IndexInterval> IndexInterval::HalfOpen(Index inclusive_min,
Index exclusive_max) {
if (!ValidHalfOpen(inclusive_min, exclusive_max)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"(", inclusive_min, ", ", exclusive_max,
") do not specify a valid half-open index interval"));
}
return UncheckedHalfOpen(inclusive_min, exclusive_max);
}
Result<IndexInterval> IndexInterval::Sized(Index inclusive_min, Index size) {
if (!ValidSized(inclusive_min, size)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("(", inclusive_min, ", ", size,
") do not specify a valid sized index interval"));
}
return UncheckedSized(inclusive_min, size);
}
std::ostream& operator<<(std::ostream& os,
const OptionallyImplicitIndexInterval& x) {
if (x.inclusive_min() == -kInfIndex) {
os << "(-inf";
} else {
os << '[' << x.inclusive_min();
}
if (x.implicit_lower()) os << '*';
os << ", ";
if (x.inclusive_max() == +kInfIndex) {
os << "+inf";
} else {
os << x.exclusive_max();
}
if (x.implicit_upper()) os << '*';
return os << ")";
}
std::ostream& operator<<(std::ostream& os, IndexInterval x) {
return os << OptionallyImplicitIndexInterval(x, false, false);
}
namespace {
template <ContainerKind CKindA, ContainerKind CKindB>
bool EqualImpl(const IndexDomainDimension<CKindA>& a,
const IndexDomainDimension<CKindB>& b) {
return (a.optionally_implicit_interval() ==
b.optionally_implicit_interval() &&
a.label() == b.label());
}
}
bool operator==(const IndexDomainDimension<container>& a,
const IndexDomainDimension<container>& b) {
return EqualImpl(a, b);
}
bool operator==(const IndexDomainDimension<view>& a,
const IndexDomainDimension<view>& b) {
return EqualImpl(a, b);
}
bool operator==(const IndexDomainDimension<view>& a,
const IndexDomainDimension<container>& b) {
return EqualImpl(a, b);
}
bool operator==(const IndexDomainDimension<container>& a,
const IndexDomainDimension<view>& b) {
return EqualImpl(a, b);
}
std::ostream& operator<<(std::ostream& os,
const IndexDomainDimension<view>& x) {
if (!x.label().empty()) {
os << QuoteString(x.label()) << ": ";
}
return os << x.optionally_implicit_interval();
}
std::ostream& operator<<(std::ostream& os,
const IndexDomainDimension<container>& x) {
return os << IndexDomainDimension<view>(x);
}
bool AreCompatibleOrUnbounded(IndexInterval a, IndexInterval b) {
Index a_lower = a.inclusive_min();
Index a_upper = a.inclusive_max();
Index b_lower = b.inclusive_min();
Index b_upper = b.inclusive_max();
return (a_lower == b_lower || a_lower == -kInfIndex ||
b_lower == -kInfIndex) &&
(a_upper == b_upper || a_upper == kInfIndex || b_upper == kInfIndex);
}
IndexInterval Hull(IndexInterval a, IndexInterval b) {
if (a.empty()) return b;
if (b.empty()) return a;
const Index lower = std::min(a.inclusive_min(), b.inclusive_min());
const Index upper = std::max(a.inclusive_max(), b.inclusive_max());
return IndexInterval::UncheckedClosed(lower, upper);
}
IndexInterval Intersect(IndexInterval a, IndexInterval b) {
const Index lower = std::max(a.inclusive_min(), b.inclusive_min());
const Index upper = std::min(a.inclusive_max(), b.inclusive_max());
const Index size = upper < lower ? 0 : upper - lower + 1;
return IndexInterval::UncheckedSized(lower, size);
}
OptionallyImplicitIndexInterval Hull(OptionallyImplicitIndexInterval a,
OptionallyImplicitIndexInterval b) {
IndexInterval interval = Hull(a.interval(), b.interval());
bool implicit_lower = (a.inclusive_min() == b.inclusive_min())
? (a.implicit_lower() && b.implicit_lower())
: (interval.inclusive_min() == a.inclusive_min()
? a.implicit_lower()
: b.implicit_lower());
bool implicit_upper = (a.inclusive_max() == b.inclusive_max())
? (a.implicit_upper() && b.implicit_upper())
: (a.inclusive_max() == interval.inclusive_max()
? a.implicit_upper()
: b.implicit_upper());
return OptionallyImplicitIndexInterval{interval, implicit_lower,
implicit_upper};
}
OptionallyImplicitIndexInterval Intersect(OptionallyImplicitIndexInterval a,
OptionallyImplicitIndexInterval b) {
IndexInterval interval = Intersect(a.interval(), b.interval());
bool implicit_lower = (a.inclusive_min() == b.inclusive_min())
? (a.implicit_lower() && b.implicit_lower())
: (interval.inclusive_min() == a.inclusive_min()
? a.implicit_lower()
: b.implicit_lower());
bool implicit_upper = (a.inclusive_max() == b.inclusive_max())
? (a.implicit_upper() && b.implicit_upper())
: (a.inclusive_max() == interval.inclusive_max()
? a.implicit_upper()
: b.implicit_upper());
return OptionallyImplicitIndexInterval{interval, implicit_lower,
implicit_upper};
}
OptionallyImplicitIndexInterval IntersectPreferringExplicit(
OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b) {
const Index inclusive_min =
a.implicit_lower() == b.implicit_lower()
? std::max(a.inclusive_min(), b.inclusive_min())
: std::max(a.effective_interval().inclusive_min(),
b.effective_interval().inclusive_min());
const Index inclusive_max =
a.implicit_upper() == b.implicit_upper()
? std::min(a.inclusive_max(), b.inclusive_max())
: std::min(a.effective_interval().inclusive_max(),
b.effective_interval().inclusive_max());
return OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(
inclusive_min, std::max(inclusive_min - 1, inclusive_max)),
a.implicit_lower() && b.implicit_lower(),
a.implicit_upper() && b.implicit_upper()};
}
bool ContainsOrUnbounded(IndexInterval outer, IndexInterval inner) {
return (inner.inclusive_min() == -kInfIndex ||
inner.inclusive_min() >= outer.inclusive_min()) &&
(inner.inclusive_max() == kInfIndex ||
inner.inclusive_max() <= outer.inclusive_max());
}
Result<IndexInterval> ShiftInterval(IndexInterval interval, Index min_offset,
Index max_offset) {
Index inclusive_min;
if (interval.inclusive_min() == -kInfIndex) {
inclusive_min = -kInfIndex;
} else if (internal::AddOverflow(interval.inclusive_min(), min_offset,
&inclusive_min) ||
!IsFiniteIndex(inclusive_min)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
interval.inclusive_min(), " + ", min_offset, " is outside valid range ",
IndexInterval::FiniteRange()));
}
Index inclusive_max;
if (interval.inclusive_max() == kInfIndex) {
inclusive_max = kInfIndex;
} else if (internal::AddOverflow(interval.inclusive_max(), max_offset,
&inclusive_max) ||
!IsFiniteIndex(inclusive_max)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
interval.inclusive_max(), " + ", max_offset, " is outside valid range ",
IndexInterval::FiniteRange()));
}
return IndexInterval::UncheckedClosed(inclusive_min, inclusive_max);
}
Result<IndexInterval> ShiftIntervalBackward(IndexInterval interval,
Index min_offset,
Index max_offset) {
return ShiftInterval(
interval, internal::wrap_on_overflow::Multiply(min_offset, Index(-1)),
internal::wrap_on_overflow::Multiply(max_offset, Index(-1)));
}
Result<IndexInterval> ShiftInterval(IndexInterval interval, Index offset) {
return ShiftInterval(interval, offset, offset);
}
Result<IndexInterval> ShiftIntervalBackward(IndexInterval interval,
Index offset) {
return ShiftIntervalBackward(interval, offset, offset);
}
Result<IndexInterval> ShiftIntervalTo(IndexInterval interval, Index origin) {
if (!IsFiniteIndex(origin)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Origin ", origin, " is outside valid range ",
IndexInterval::FiniteRange()));
}
if (interval.inclusive_min() == -kInfIndex) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Interval ", interval, " is not bounded below"));
}
Index offset;
[[maybe_unused]] const bool overflow =
internal::SubOverflow(origin, interval.inclusive_min(), &offset);
assert(!overflow);
return ShiftInterval(interval, offset);
}
absl::Status CheckContains(IndexInterval interval, Index index) {
if (Contains(interval, index)) return absl::OkStatus();
return absl::OutOfRangeError(tensorstore::StrCat(
"Index ", index, " is outside valid range ", interval));
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>> ExtractStridedSlice(
OptionallyImplicitIndexInterval orig, IntervalForm interval_form,
Index start, Index stop_or_size, Index stride) {
const IndexInterval constraint = IndexInterval::UncheckedClosed(
orig.implicit_lower() ? -kInfIndex : orig.inclusive_min(),
orig.implicit_upper() ? kInfIndex : orig.inclusive_max());
if (stride == 0 || stride == std::numeric_limits<Index>::min()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid stride ", stride));
}
if (start == kImplicit) {
start = stride > 0 ? orig.inclusive_min() : orig.inclusive_max();
} else {
if (!IsValidIndex(start)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid start index ", start));
}
orig.implicit_lower() = false;
}
Index inclusive_stop;
if (interval_form == IntervalForm::sized) {
Index size = stop_or_size;
if (size == kImplicit) {
inclusive_stop = stride > 0 ? orig.inclusive_max() : orig.inclusive_min();
} else {
if (size < 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Negative size ", size, " specified for sized interval"));
}
orig.implicit_upper() = false;
if (size == 0) {
inclusive_stop = start + (stride > 0 ? -1 : 1);
} else {
if (internal::MulOverflow(stride, size - 1, &inclusive_stop) ||
internal::AddOverflow(start, inclusive_stop, &inclusive_stop)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Integer overflow computing slice result"));
}
}
}
} else {
if (stop_or_size == kImplicit) {
inclusive_stop = stride > 0 ? orig.inclusive_max() : orig.inclusive_min();
} else {
orig.implicit_upper() = false;
if (interval_form == IntervalForm::closed ||
!IsFiniteIndex(stop_or_size)) {
inclusive_stop = stop_or_size;
} else {
assert(interval_form == IntervalForm::half_open);
inclusive_stop = stop_or_size + (stride > 0 ? -1 : 1);
}
}
}
if (std::abs(stride) != 1 && !IsFiniteIndex(start)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Slicing with non-unit stride of ", stride,
" requires a finite start index"));
}
Index adjusted_inclusive_min, adjusted_inclusive_max;
if (stride > 0) {
adjusted_inclusive_min = start;
adjusted_inclusive_max = inclusive_stop;
} else {
adjusted_inclusive_min = inclusive_stop;
adjusted_inclusive_max = start;
std::swap(orig.implicit_lower(), orig.implicit_upper());
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto adjusted_interval,
IndexInterval::Closed(adjusted_inclusive_min, adjusted_inclusive_max));
if (!Contains(constraint, adjusted_interval)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Slice interval ", adjusted_interval,
" is not contained within domain ", constraint));
}
Index new_start = start / stride;
Index new_size =
std::abs(inclusive_stop) == kInfIndex
? kInfIndex + 1 - new_start
: CeilOfRatio(adjusted_interval.size(), std::abs(stride));
orig.interval() = IndexInterval::UncheckedSized(new_start, new_size);
return {std::in_place, orig, start};
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>>
ExtractHalfOpenStridedSlice(OptionallyImplicitIndexInterval orig, Index start,
Index stop, Index stride) {
return ExtractStridedSlice(orig, IntervalForm::half_open, start, stop,
stride);
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>>
ExtractClosedStridedSlice(OptionallyImplicitIndexInterval orig, Index start,
Index stop, Index stride) {
return ExtractStridedSlice(orig, IntervalForm::closed, start, stop, stride);
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>>
ExtractSizedStridedSlice(OptionallyImplicitIndexInterval orig, Index start,
Index size, Index stride) {
return ExtractStridedSlice(orig, IntervalForm::sized, start, size, stride);
}
absl::Status ComputeStridedSliceMap(OptionallyImplicitIndexInterval orig,
IntervalForm interval_form,
Index translate_origin_to, Index start,
Index stop_or_size, Index stride,
OptionallyImplicitIndexInterval* new_domain,
Index* output_offset) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_interval_and_adjusted_start,
ExtractStridedSlice(orig, interval_form, start, stop_or_size, stride));
OptionallyImplicitIndexInterval& new_interval =
new_interval_and_adjusted_start.first;
Index adjusted_start = new_interval_and_adjusted_start.second;
if (translate_origin_to != kImplicit) {
TENSORSTORE_ASSIGN_OR_RETURN(
new_interval.interval(),
ShiftIntervalTo(new_interval.interval(), translate_origin_to));
}
*new_domain = new_interval;
*output_offset = adjusted_start - new_interval.inclusive_min() * stride;
return absl::OkStatus();
}
Result<IndexInterval> GetAffineTransformDomain(IndexInterval interval,
Index offset, Index divisor) {
assert(divisor != 0);
if (interval == IndexInterval()) {
return interval;
}
do {
Index result_lower, result_size;
Index lower, upper;
if (divisor < 0) {
if (divisor == std::numeric_limits<Index>::min() ||
offset == std::numeric_limits<Index>::min()) {
break;
}
divisor = -divisor;
offset = -offset;
lower = -interval.inclusive_max();
upper = -interval.inclusive_min();
if (interval.empty()) {
--lower;
--upper;
}
} else {
lower = interval.inclusive_min();
upper = interval.inclusive_max();
}
if (lower == -kInfIndex) {
result_lower = -kInfIndex;
} else {
if (internal::SubOverflow(lower, offset, &result_lower)) break;
result_lower = CeilOfRatio(result_lower, divisor);
if (!IsFiniteIndex(result_lower)) break;
}
if (interval.empty()) {
result_size = 0;
} else if (upper == kInfIndex) {
result_size = kInfIndex - result_lower + 1;
} else {
Index result_upper;
if (internal::SubOverflow(upper, offset, &result_upper)) break;
result_upper = FloorOfRatio(result_upper, divisor);
if (!IsFiniteIndex(result_upper)) break;
result_size = result_upper - result_lower + 1;
}
return IndexInterval::UncheckedSized(result_lower, result_size);
} while (false);
return absl::InvalidArgumentError(
tensorstore::StrCat("Integer overflow propagating range ", interval,
" through inverse affine transform with offset ",
offset, " and multiplier ", divisor));
}
Result<OptionallyImplicitIndexInterval> GetAffineTransformDomain(
OptionallyImplicitIndexInterval interval, Index offset, Index divisor) {
TENSORSTORE_ASSIGN_OR_RETURN(
interval.interval(),
GetAffineTransformDomain(interval.interval(), offset, divisor));
if (divisor < 0) {
std::swap(interval.implicit_lower(), interval.implicit_upper());
}
return interval;
}
namespace {
absl::Status GetAffineTransformError(IndexInterval interval, Index offset,
Index multiplier) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing affine transform of domain ", interval,
" with offset ", offset, " and multiplier ", multiplier));
}
}
Result<IndexInterval> GetAffineTransformRange(IndexInterval interval,
Index offset, Index multiplier) {
const auto transform_bound_overflow = [&](Index* bound) {
if (*bound == -kInfIndex || *bound == kInfIndex) {
if (multiplier < 0) *bound *= -1;
return false;
}
return (internal::MulOverflow(*bound, multiplier, bound) ||
internal::AddOverflow(*bound, offset, bound) ||
!IsFiniteIndex(*bound));
};
Index lower = interval.inclusive_min(), upper = interval.inclusive_max();
if (transform_bound_overflow(&lower) || transform_bound_overflow(&upper)) {
return GetAffineTransformError(interval, offset, multiplier);
}
if (interval.empty()) {
return IndexInterval::UncheckedSized(lower, 0);
}
if (multiplier == 0) {
return IndexInterval::UncheckedSized(lower, 1);
}
if (multiplier < 0) std::swap(lower, upper);
return IndexInterval::UncheckedClosed(lower, upper);
}
Result<IndexInterval> GetAffineTransformInverseDomain(IndexInterval interval,
Index offset,
Index divisor) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_interval, GetAffineTransformRange(interval, offset, divisor));
if (new_interval.empty()) return new_interval;
if (divisor > 0 && new_interval.inclusive_max() != kInfIndex) {
Index new_inclusive_max;
if (internal::AddOverflow(new_interval.inclusive_max(), divisor - 1,
&new_inclusive_max) ||
!IsFiniteIndex(new_inclusive_max)) {
return GetAffineTransformError(interval, offset, divisor);
}
return IndexInterval::UncheckedClosed(new_interval.inclusive_min(),
new_inclusive_max);
}
if (divisor < 0 && new_interval.inclusive_min() != -kInfIndex) {
Index new_inclusive_min;
if (internal::AddOverflow(new_interval.inclusive_min(), divisor + 1,
&new_inclusive_min) ||
!IsFiniteIndex(new_inclusive_min)) {
return GetAffineTransformError(interval, offset, divisor);
}
return IndexInterval::UncheckedClosed(new_inclusive_min,
new_interval.inclusive_max());
}
return new_interval;
}
Result<OptionallyImplicitIndexInterval> GetAffineTransformRange(
OptionallyImplicitIndexInterval interval, Index offset, Index multiplier) {
TENSORSTORE_ASSIGN_OR_RETURN(
interval.interval(),
GetAffineTransformRange(interval.interval(), offset, multiplier));
if (multiplier < 0) {
std::swap(interval.implicit_lower(), interval.implicit_upper());
}
return interval;
}
Result<std::string_view> MergeDimensionLabels(std::string_view a,
std::string_view b) {
if (a.empty()) return b;
if (b.empty()) return a;
if (a == b) return a;
return absl::InvalidArgumentError("Dimension labels do not match");
}
Result<OptionallyImplicitIndexInterval> MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b) {
if (a == b) return a;
Index inclusive_min, inclusive_max;
bool implicit_lower, implicit_upper;
if (a.inclusive_min() == -kInfIndex && a.implicit_lower() == true) {
inclusive_min = b.inclusive_min();
implicit_lower = b.implicit_lower();
} else if (b.inclusive_min() == -kInfIndex && b.implicit_lower() == true) {
inclusive_min = a.inclusive_min();
implicit_lower = a.implicit_lower();
} else if (a.inclusive_min() != b.inclusive_min()) {
return absl::InvalidArgumentError("Lower bounds do not match");
} else {
inclusive_min = a.inclusive_min();
implicit_lower = a.implicit_lower() && b.implicit_lower();
}
if (a.inclusive_max() == kInfIndex && a.implicit_upper() == true) {
inclusive_max = b.inclusive_max();
implicit_upper = b.implicit_upper();
} else if (b.inclusive_max() == kInfIndex && b.implicit_upper() == true) {
inclusive_max = a.inclusive_max();
implicit_upper = a.implicit_upper();
} else if (a.inclusive_max() != b.inclusive_max()) {
return absl::InvalidArgumentError("Upper bounds do not match");
} else {
inclusive_max = a.inclusive_max();
implicit_upper = a.implicit_upper() && b.implicit_upper();
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto interval, IndexInterval::Closed(inclusive_min, inclusive_max));
return OptionallyImplicitIndexInterval{interval, implicit_lower,
implicit_upper};
}
namespace serialization {
bool Serializer<IndexInterval>::Encode(EncodeSink& sink,
const IndexInterval& value) {
return serialization::EncodeTuple(sink, value.inclusive_min(), value.size());
}
bool Serializer<IndexInterval>::Decode(DecodeSource& source,
IndexInterval& value) {
Index inclusive_min, size;
if (!serialization::DecodeTuple(source, inclusive_min, size)) {
return false;
}
TENSORSTORE_ASSIGN_OR_RETURN(value, IndexInterval::Sized(inclusive_min, size),
(source.Fail(_), false));
return true;
}
}
} | #include "tensorstore/index_interval.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::AreCompatibleOrUnbounded;
using ::tensorstore::ComputeStridedSliceMap;
using ::tensorstore::container;
using ::tensorstore::DividePositiveRoundOut;
using ::tensorstore::ExplicitIndexOr;
using ::tensorstore::ExtractClosedStridedSlice;
using ::tensorstore::ExtractHalfOpenStridedSlice;
using ::tensorstore::ExtractSizedStridedSlice;
using ::tensorstore::GetAffineTransformInverseDomain;
using ::tensorstore::ImplicitOrEqual;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainDimension;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexIntervalRef;
using ::tensorstore::Intersect;
using ::tensorstore::IntervalForm;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeDimensionLabels;
using ::tensorstore::MergeOptionallyImplicitIndexIntervals;
using ::tensorstore::OptionallyImplicitIndexInterval;
using ::tensorstore::ShiftInterval;
using ::tensorstore::ShiftIntervalBackward;
using ::tensorstore::ShiftIntervalTo;
using ::tensorstore::StrCat;
using ::tensorstore::view;
using ::tensorstore::serialization::TestSerializationRoundTrip;
using ::testing::Optional;
using ::testing::Pair;
TEST(IndexIntervalTest, DefaultConstruct) {
IndexInterval x;
EXPECT_EQ(-kInfIndex, x.inclusive_min());
EXPECT_EQ(-kInfIndex - 1, x.exclusive_min());
EXPECT_EQ(kInfIndex, x.inclusive_max());
EXPECT_EQ(kInfIndex + 1, x.exclusive_max());
EXPECT_EQ(kInfSize, x.size());
EXPECT_FALSE(x.empty());
}
TEST(IndexIntervalTest, Empty) {
EXPECT_TRUE(IndexInterval::UncheckedSized(1, 0).empty());
}
TEST(IndexIntervalTest, ValidSized) {
EXPECT_TRUE(IndexInterval::ValidSized(0, 0));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, kInfSize));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, 100));
EXPECT_TRUE(IndexInterval::ValidSized(kInfIndex - 5, 6));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, 2));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidSized(5, -1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex - 5, 7));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex, 0));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex, 1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex, 1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex, 0));
}
TEST(IndexIntervalTest, ValidClosed) {
EXPECT_TRUE(IndexInterval::ValidClosed(0, 0));
EXPECT_TRUE(IndexInterval::ValidClosed(0, -1));
EXPECT_TRUE(IndexInterval::ValidClosed(-kInfIndex, kInfIndex));
EXPECT_TRUE(IndexInterval::ValidClosed(-5, kInfIndex));
EXPECT_TRUE(IndexInterval::ValidClosed(-kInfIndex, -kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidClosed(0, -2));
EXPECT_FALSE(IndexInterval::ValidClosed(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidClosed(0, kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidClosed(-kInfIndex, -kInfIndex));
EXPECT_FALSE(IndexInterval::ValidClosed(+kInfIndex, +kInfIndex));
}
TEST(IndexIntervalTest, ValidHalfOpen) {
EXPECT_TRUE(IndexInterval::ValidHalfOpen(0, 0));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(0, -1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-kInfIndex, kInfIndex + 1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-5, kInfIndex + 1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex + 2));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(0, kInfIndex + 2));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(kInfIndex, kInfIndex));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(kInfIndex, kInfIndex + 1));
}
TEST(IndexIntervalTest, Sized) {
EXPECT_EQ(IndexInterval::UncheckedSized(0, 5), IndexInterval::Sized(0, 5));
EXPECT_THAT(IndexInterval::Sized(0, -1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, UncheckedSized) {
auto x = IndexInterval::UncheckedSized(1, 5);
EXPECT_EQ(1, x.inclusive_min());
EXPECT_EQ(0, x.exclusive_min());
EXPECT_EQ(5, x.size());
EXPECT_EQ(5, x.inclusive_max());
EXPECT_EQ(6, x.exclusive_max());
}
TEST(IndexIntervalTest, Equality) {
EXPECT_TRUE(IndexInterval::UncheckedSized(1, 2) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(1, 2) !=
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(1, 3) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(2, 2) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_TRUE(IndexInterval::UncheckedSized(2, 3) ==
IndexInterval::UncheckedClosed(2, 4));
EXPECT_TRUE(IndexInterval::UncheckedSized(2, 3) ==
IndexInterval::UncheckedHalfOpen(2, 5));
}
TEST(IndexIntervalTest, UncheckedClosed) {
EXPECT_EQ(IndexInterval::UncheckedSized(2, 3),
IndexInterval::UncheckedClosed(2, 4));
}
TEST(IndexIntervalTest, Closed) {
EXPECT_EQ(IndexInterval::UncheckedClosed(2, 4), IndexInterval::Closed(2, 4));
EXPECT_THAT(IndexInterval::Closed(2, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, UncheckedHalfOpen) {
EXPECT_EQ(IndexInterval::UncheckedSized(2, 2),
IndexInterval::UncheckedHalfOpen(2, 4));
}
TEST(IndexIntervalTest, HalfOpen) {
EXPECT_EQ(IndexInterval::UncheckedHalfOpen(2, 4),
IndexInterval::HalfOpen(2, 4));
EXPECT_THAT(IndexInterval::HalfOpen(2, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, ContainsIndex) {
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 5));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 3));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 15));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15), 2));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15), 16));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15),
kMinFiniteIndex));
EXPECT_FALSE(
Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15), -kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15), 16));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, kInfIndex), 16));
EXPECT_TRUE(
Contains(IndexInterval::UncheckedClosed(3, kInfIndex), kMaxFiniteIndex));
EXPECT_FALSE(
Contains(IndexInterval::UncheckedClosed(3, kInfIndex), kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
-kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
kInfIndex));
EXPECT_TRUE(
Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), 3));
}
TEST(IndexIntervalTest, ContainsInterval) {
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 15)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(4, 15)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 14)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(6, 8)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedSized(20, 0)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(2, 10)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 16)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(5, 16)));
}
TEST(IndexIntervalTest, IsFinite) {
EXPECT_TRUE(IsFinite(IndexInterval::UncheckedClosed(3, 15)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(-kInfIndex, 15)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex)));
}
TEST(IndexIntervalTest, Intersect) {
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 5),
Intersect(IndexInterval::UncheckedClosed(-3, 5),
IndexInterval::UncheckedClosed(3, 10)));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 5),
Intersect(IndexInterval::UncheckedClosed(3, 10),
IndexInterval::UncheckedClosed(-3, 5)));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 10),
Intersect(IndexInterval::UncheckedClosed(3, 10),
IndexInterval::UncheckedClosed(-3, 11)));
EXPECT_EQ(IndexInterval::UncheckedSized(3, 0),
Intersect(IndexInterval::UncheckedClosed(-3, 0),
IndexInterval::UncheckedClosed(3, 5)));
}
TEST(IndexIntervalTest, IntersectOptionallyImplicit) {
using OIII = OptionallyImplicitIndexInterval;
EXPECT_THAT(
Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), false, false},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, false}));
EXPECT_THAT(
Intersect(OIII{IndexInterval::UncheckedClosed(2, 5), false, true},
OIII{IndexInterval::UncheckedClosed(1, 6), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, true}));
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), c, d}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), c, b}))
<< x;
}
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(0, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), false, false}));
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kMaxFiniteIndex),
false, true}));
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false},
OIII{IndexInterval::UncheckedClosed(0, 10), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), true, true}));
}
TEST(IndexIntervalTest, IntersectPreferringExplicit) {
using OIII = OptionallyImplicitIndexInterval;
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), a, b}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), a, b}))
<< x;
}
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(1, 5), false, false},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(2, 5), false, true},
OIII{IndexInterval::UncheckedClosed(1, 6), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false},
OIII{IndexInterval::UncheckedClosed(3, 10), true, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), true, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), false, false},
OIII{IndexInterval::UncheckedClosed(3, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), false, true},
OIII{IndexInterval::UncheckedClosed(3, 10), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), false, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, true},
OIII{IndexInterval::UncheckedClosed(3, 10), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), true, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false},
OIII{IndexInterval::UncheckedClosed(-5, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-5, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-5, 10), false, false},
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-5, 5), false, false}));
EXPECT_THAT(IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 12), true, false},
OIII{IndexInterval::UncheckedClosed(-5, 10), false, true}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-5, 12), false, false}));
EXPECT_THAT(IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-5, 10), false, true},
OIII{IndexInterval::UncheckedClosed(-3, 12), true, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-5, 12), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(0, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false},
OIII{IndexInterval::UncheckedClosed(0, 10), true, true}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false}));
}
TEST(IndexIntervalTest, Hull) {
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 15),
Hull(IndexInterval::UncheckedClosed(3, 5),
IndexInterval::UncheckedClosed(10, 15)));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 15),
Hull(IndexInterval::UncheckedClosed(0, -1),
IndexInterval::UncheckedClosed(5, 15)));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 15),
Hull(IndexInterval::UncheckedClosed(5, 15),
IndexInterval::UncheckedClosed(0, -1)));
EXPECT_EQ(IndexInterval::UncheckedClosed(0, -1),
Hull(IndexInterval::UncheckedClosed(5, 4),
IndexInterval::UncheckedClosed(0, -1)));
}
TEST(IndexIntervalTest, HullOptionallyImplicit) {
using OIII = OptionallyImplicitIndexInterval;
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(1, 5), false, true},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(1, 6), false, true}));
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Hull(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), c, d}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-3, 10), a, d}))
<< x;
}
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
true, false}));
}
TEST(IndexIntervalTest, ContainsOrUnbounded) {
EXPECT_TRUE(
ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_TRUE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(6, 9)));
EXPECT_FALSE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(4, 10)));
EXPECT_TRUE(
ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(5, kInfIndex)));
EXPECT_FALSE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(5, 11)));
EXPECT_TRUE(ContainsOrUnbounded(
IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, AreCompatibleOrUnbounded) {
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval(), IndexInterval()));
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval(),
IndexInterval::UncheckedSized(1, 4)));
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval()));
EXPECT_FALSE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval::UncheckedSized(1, 5)));
EXPECT_FALSE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval::UncheckedSized(2, 3)));
EXPECT_TRUE(
AreCompatibleOrUnbounded(IndexInterval::UncheckedClosed(1, 4),
IndexInterval::UncheckedClosed(-kInfIndex, 4)));
EXPECT_TRUE(
AreCompatibleOrUnbounded(IndexInterval::UncheckedClosed(1, 4),
IndexInterval::UncheckedClosed(1, kInfIndex)));
}
TEST(IndexIntervalTest, Ostream) {
EXPECT_EQ("[1, 3)", StrCat(IndexInterval::UncheckedClosed(1, 2)));
EXPECT_EQ("(-inf, 3)", StrCat(IndexInterval::UncheckedClosed(-kInfIndex, 2)));
EXPECT_EQ("[7, +inf)", StrCat(IndexInterval::UncheckedClosed(7, kInfIndex)));
}
TEST(IndexIntervalTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
IndexInterval(),
IndexInterval::UncheckedSized(0, 1),
IndexInterval::UncheckedSized(0, 0),
IndexInterval::UncheckedSized(0, 2),
IndexInterval::UncheckedSized(1, 2),
}));
}
TEST(IndexIntervalTest, ShiftInterval) {
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, kInfIndex), 2),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(ShiftInterval(
IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101), -1),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
Optional(IndexInterval::UncheckedClosed(-kInfIndex + 5,
-kInfIndex + 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(5, 10), kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-1, 10), kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, -5),
kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-5 \\+ -[0-9]+ is outside valid range .*"));
}
TEST(IndexIntervalTest, ShiftIntervalBackward) {
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval(), std::numeric_limits<Index>::min()),
Optional(IndexInterval()));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kInfIndex, 8), -2),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, kInfIndex), -2),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101), 1),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
Optional(
IndexInterval::UncheckedClosed(-kInfIndex + 5, -kInfIndex + 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10),
kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(-1, 10),
kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kInfIndex, -5),
kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-5 \\+ -[0-9]+ is outside valid range .*"));
}
TEST(IndexIntervalTest, ShiftIntervalSeparateOffsets) {
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 0, 5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 1, 5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex + 1, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), -1, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-[0-9]+ \\+ -1 is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-1, 8),
std::numeric_limits<Index>::min(), 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), -1, 0),
Optional(IndexInterval::UncheckedClosed(1, kMaxFiniteIndex)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), -1, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"[0-9]+ \\+ 1 is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(2, 1), -1,
std::numeric_limits<Index>::max()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(0, 8),
std::numeric_limits<Index>::min(), 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"0 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(1, +kInfIndex), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, +kInfIndex)));
EXPECT_THAT(ShiftInterval(
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex), 2, 5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, ShiftIntervalBackwardSeparateOffsets) {
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 0, -5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 13)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8),
-1, -5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex + 1, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 1, -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-[0-9]+ \\+ -1 is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(-1, 8),
std::numeric_limits<Index>::max(), -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), 1, 0),
Optional(IndexInterval::UncheckedClosed(1, kMaxFiniteIndex)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), 1, -1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"[0-9]+ \\+ 1 is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(2, 1), 1,
std::numeric_limits<Index>::min()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(0, 8),
std::numeric_limits<Index>::max(), -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"0 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kInfIndex, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(1, +kInfIndex), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, +kInfIndex)));
EXPECT_THAT(
ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex), -2, -5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, ShiftIntervalTo) {
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(1, 8), 3),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Interval .* is not bounded below"));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(1, kInfIndex), 3),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101),
kMinFiniteIndex),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Origin -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Origin [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"10 \\+ [0-9]+ is outside valid range .*"));
}
TEST(ExtractStridedSliceTest, Closed) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 6, 9, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(6, 4), false, false}, 6));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, true}, 3, 15, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(3, 15), false, false}, 3));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), false, true}, 10));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), true, false}, 10));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, 6, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-4, 2), false, false}, 9));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
kImplicit, 9, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(5, 5), false, false}, 5));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
-kInfIndex, 9, 1),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Slice interval \\(-inf, 10\\) is not contained "
"within domain \\[5, 11\\)"));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, kImplicit, 6,
-2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-5, 3), false, false}, 10));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9,
-kInfIndex, -2),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Slice interval \\(-inf, 10\\) is not contained "
"within domain \\[5, 11\\)"));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, kImplicit,
-2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-4, 3), false, false}, 9));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7,
kImplicit, 2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(3, 2), false, false}, 7));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), false, false},
kImplicit, 10, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-kInfIndex, 10), false, false},
-kInfIndex));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), false, false},
5, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(5, kInfIndex), false, false},
5));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), false, false},
kImplicit, 5, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-kInfIndex, -5), false, false},
kInfIndex));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, kImplicit, 6,
0),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid stride 0"));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
kImplicit, 6, std::numeric_limits<Index>::min()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid stride -[0-9]+"));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 4, 6, 1),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Slice interval \\[4, 7\\) is not contained within domain "
"\\[5, 11\\)"));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(3, 10), false, false},
-kInfIndex - 1, 10, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid start index -[0-9]+"));
}
TEST(ExtractStridedSliceTest, Sized) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, 3, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-4, -2), false, false}, 9));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, 7, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(7, 10), false, true}, 7));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, true},
kImplicit, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(5, 10), true, true}, 5));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), false, true}, 10));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), true, false}, 10));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, kImplicit,
-2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-4, -2), false, false}, 9));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7, kImplicit,
2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(3, 4), false, false}, 7));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7, 0, 2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(3, 0), false, false}, 7));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7, 0, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-3, 0), false, false}, 7));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, -1, -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Negative size -1 specified for sized interval"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
std::numeric_limits<Index>::min() + 1, 0, 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid start index -[0-9]+"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
std::numeric_limits<Index>::max(), 0, -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid start index [0-9]+"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 5, 100,
kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Integer overflow computing slice result"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 5,
kInfIndex, 2),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Integer overflow computing slice result"));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, 10), false, false},
kImplicit, kImplicit, 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Slicing with non-unit stride of 2 requires a "
"finite start index"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(3, kInfIndex), false, false},
kImplicit, kImplicit, -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Slicing with non-unit stride of -2 requires a "
"finite start index"));
}
TEST(ExtractStridedSliceTest, HalfOpen) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, 7, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-4, -4), false, false}, 9));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, kImplicit, 8, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(5, 8), true, false}, 5));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, 6, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(6, 11), false, true}, 6));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, 3, 8, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(3, 8), false, false}, 3));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, 6, 15, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(6, 15), false, false}, 6));
EXPECT_THAT(ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9,
std::numeric_limits<Index>::min() + 1, 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".* do not specify a valid closed index interval"));
EXPECT_THAT(ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9,
std::numeric_limits<Index>::max(), -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".* do not specify a valid closed index interval"));
}
TEST(ComputeStridedSliceMapTest, NoTranslationStride1) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kImplicit,
2,
8,
1, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(2, 8), false, false}),
new_domain);
EXPECT_EQ(0, output_offset);
}
TEST(ComputeStridedSliceMapTest, NoTranslationStride2) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kImplicit,
2,
8,
2, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 4), false, false}),
new_domain);
EXPECT_EQ(0, output_offset);
}
TEST(ComputeStridedSliceMapTest, NoTranslationStrideNegative2) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kImplicit,
9,
2,
-2, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(-4, 0), false, false}),
new_domain);
EXPECT_EQ(1, output_offset);
}
TEST(ComputeStridedSliceMapTest, TranslationStride1) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
7,
2,
8,
1, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(7, 13), false, false}),
new_domain);
EXPECT_EQ(-5, output_offset);
}
TEST(ComputeStridedSliceMapTest, TranslationError) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_THAT(ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kMaxFiniteIndex,
2,
8,
1, &new_domain, &output_offset),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComputeStridedSliceMapTest, SliceError) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_THAT(ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(3, 10), false, false},
IntervalForm::half_open,
kMaxFiniteIndex,
2,
8,
1, &new_domain, &output_offset),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(GetAffineTransformDomainTest, Divisor1) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-9, -1),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
10, 1)
.value());
}
TEST(GetAffineTransformDomainTest, Divisor2) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-2, 1),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
6, 2)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorNegative1) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-3, 5),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
6, -1)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorNegative2) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-1, 2),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
6, -2)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorNegative2LargeMagnitude) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-(kInfIndex - 10) / 2, 5),
GetAffineTransformDomain(
IndexInterval::UncheckedClosed(-10, kInfIndex - 10),
0, -2)
.value());
}
TEST(GetAffineTransformDomainTest, EmptyInterval) {
EXPECT_EQ(IndexInterval::UncheckedSized(-2, 0),
GetAffineTransformDomain(IndexInterval::UncheckedSized(10, 0),
5, -2)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorInvalid) {
EXPECT_THAT(GetAffineTransformDomain(
IndexInterval::UncheckedClosed(1, 10),
0, std::numeric_limits<Index>::min()),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GetAffineTransformDomainTest, OffsetInvalid) {
EXPECT_THAT(GetAffineTransformDomain(
IndexInterval::UncheckedClosed(1, 10),
std::numeric_limits<Index>::min(), -1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
void TestGetAffineTransformRangeRoundTrip(IndexInterval domain, Index offset,
Index multiplier,
IndexInterval range) {
EXPECT_THAT(GetAffineTransformRange(domain, offset, multiplier),
::testing::Optional(range))
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
EXPECT_THAT(GetAffineTransformDomain(range, offset, multiplier),
::testing::Optional(domain))
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto inv_domain,
GetAffineTransformInverseDomain(domain, offset, multiplier));
EXPECT_THAT(GetAffineTransformDomain(inv_domain, offset, multiplier),
::testing::Optional(domain))
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range
<< ", inv_domain=" << inv_domain;
}
TEST(GetAffineTransformRangeTest, SerializationRoundTrip) {
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedClosed(1, 10), 3,
1,
IndexInterval::UncheckedClosed(4, 13));
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedClosed(1, 10), 3,
2,
IndexInterval::UncheckedClosed(2 + 3, 10 * 2 + 3));
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedSized(4, 0), 3,
2, IndexInterval::UncheckedSized(2 * 4 + 3, 0));
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedSized(4, 0), 3,
-2,
IndexInterval::UncheckedSized(-2 * 4 + 3, 0));
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::min(),
1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::max(),
1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), 0,
1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::min(),
-1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::max(),
-1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), 0,
-1,
IndexInterval());
}
TEST(GetAffineTransformRangeTest, ZeroMultiplier) {
EXPECT_EQ(IndexInterval::UncheckedSized(3, 1),
GetAffineTransformRange(IndexInterval::UncheckedClosed(4, 10), 3, 0)
.value());
}
TEST(GetAffineTransformRangeTest, ErrorCases) {
EXPECT_THAT(GetAffineTransformRange(IndexInterval::UncheckedClosed(3, 10),
kInfIndex, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(GetAffineTransformRange(IndexInterval::UncheckedClosed(3, 10), 5,
kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(GetAffineTransformRange(
IndexInterval::UncheckedClosed(-1, 1),
std::numeric_limits<Index>::max() - kInfIndex + 1, kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GetAffineTransformInverseDomainTest, Examples) {
EXPECT_THAT(
GetAffineTransformRange(IndexInterval::UncheckedClosed(2, 4), 1, 3),
::testing::Optional(IndexInterval::UncheckedClosed(7, 13)));
EXPECT_THAT(GetAffineTransformInverseDomain(
IndexInterval::UncheckedClosed(2, 4), 1, 3),
::testing::Optional(IndexInterval::UncheckedClosed(7, 15)));
EXPECT_THAT(
GetAffineTransformRange(IndexInterval::UncheckedClosed(2, 4), 1, -3),
::testing::Optional(IndexInterval::UncheckedClosed(-11, -5)));
EXPECT_THAT(GetAffineTransformInverseDomain(
IndexInterval::UncheckedClosed(2, 4), 1, -3),
::testing::Optional(IndexInterval::UncheckedClosed(-13, -5)));
}
void TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
OptionallyImplicitIndexInterval domain, Index offset, Index multiplier,
OptionallyImplicitIndexInterval range) {
EXPECT_EQ(GetAffineTransformRange(domain, offset, multiplier).value(), range)
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
EXPECT_EQ(GetAffineTransformDomain(range, offset, multiplier).value(), domain)
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
}
TEST(GetAffineTransformRangeTest, OptionallyImplicitRoundTrip) {
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), true, false},
3,
1, {IndexInterval::UncheckedClosed(4, 13), true, false});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), true, true},
3,
1, {IndexInterval::UncheckedClosed(4, 13), true, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, false},
3,
1, {IndexInterval::UncheckedClosed(4, 13), false, false});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, true},
3,
1, {IndexInterval::UncheckedClosed(4, 13), false, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, true},
-3,
1, {IndexInterval::UncheckedClosed(-2, 7), false, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, true},
3,
-1, {IndexInterval::UncheckedClosed(-7, 2), true, false});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), true, false},
3,
-1, {IndexInterval::UncheckedClosed(-7, 2), false, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedSized(4, 0), true, false},
3,
-2,
{IndexInterval::UncheckedSized(-2 * 4 + 3, 0), false, true});
}
TEST(GetAffineTransformRangeTest, OptionallyImplicitErrorCases) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(GetAffineTransformRange(
OIII{IndexInterval::UncheckedClosed(3, 10), true, false},
kInfIndex, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GetAffineTransformDomainTest, OptionallyImplicitErrorCases) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(GetAffineTransformDomain(
OIII{IndexInterval::UncheckedClosed(1, 10), true, false},
std::numeric_limits<Index>::min(), -1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalRefTest, Basic) {
Index inclusive_min = 5, size = 10;
IndexIntervalRef ref = IndexIntervalRef::UncheckedSized(inclusive_min, size);
EXPECT_EQ(5, ref.inclusive_min());
EXPECT_EQ(4, ref.exclusive_min());
EXPECT_EQ(10, ref.size());
EXPECT_EQ(15, ref.exclusive_max());
EXPECT_EQ(14, ref.inclusive_max());
EXPECT_EQ(IndexInterval::UncheckedSized(5, 10),
static_cast<IndexInterval>(ref));
ref = IndexInterval::UncheckedSized(6, 9);
EXPECT_EQ(6, inclusive_min);
EXPECT_EQ(9, size);
EXPECT_FALSE(ref.empty());
size = 0;
EXPECT_TRUE(ref.empty());
}
TEST(IndexIntervalRefTest, ConstructFromIndexInterval) {
IndexInterval interval = IndexInterval::UncheckedSized(5, 10);
IndexIntervalRef ref(interval);
ref = IndexInterval::UncheckedSized(3, 6);
EXPECT_EQ(interval, IndexInterval::UncheckedSized(3, 6));
}
TEST(IndexIntervalRefTest, ImplicitConversion) {
IndexInterval interval = IndexInterval::UncheckedSized(5, 10);
IndexIntervalRef ref(interval);
IndexInterval interval2 = ref;
EXPECT_EQ(interval, interval2);
EXPECT_TRUE(IsFinite(ref));
EXPECT_TRUE(Contains(ref, ref.inclusive_min()));
EXPECT_TRUE(Contains(ref, ref));
EXPECT_EQ(ref, Intersect(ref, ref));
EXPECT_EQ(ref, Hull(ref, ref));
}
TEST(IndexIntervalRefTest, Assignment) {
IndexInterval interval = IndexInterval::UncheckedSized(5, 10);
IndexIntervalRef ref(interval);
IndexInterval interval2 = ref;
IndexIntervalRef ref2(interval2);
ref2 = ref;
EXPECT_EQ(IndexInterval::UncheckedSized(5, 10), interval2);
EXPECT_EQ(IndexInterval::UncheckedSized(5, 10), interval);
}
TEST(OptionallyImplicitIndexIntervalTest, EffectiveInterval) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-kInfIndex, 2),
OptionallyImplicitIndexInterval(
IndexInterval::UncheckedClosed(1, 2), true, false)
.effective_interval());
EXPECT_EQ(IndexInterval::UncheckedClosed(1, +kInfIndex),
OptionallyImplicitIndexInterval(
IndexInterval::UncheckedClosed(1, 2), false, true)
.effective_interval());
EXPECT_EQ(IndexInterval(),
OptionallyImplicitIndexInterval(
IndexInterval::UncheckedClosed(1, 2), true, true)
.effective_interval());
}
TEST(OptionallyImplicitIndexIntervalTest, Ostream) {
EXPECT_EQ("[1*, 3)", StrCat(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 2), true, false}));
EXPECT_EQ("(-inf, 3*)",
StrCat(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 2), false, true}));
EXPECT_EQ("[7*, +inf*)",
StrCat(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(7, kInfIndex), true, true}));
}
TEST(OptionallyImplicitIndexIntervalTest, Comparison) {
OptionallyImplicitIndexInterval a{};
OptionallyImplicitIndexInterval b{IndexInterval::UncheckedSized(0, 1), false,
false};
OptionallyImplicitIndexInterval c{IndexInterval::UncheckedSized(0, 1), false,
true};
OptionallyImplicitIndexInterval d{IndexInterval::UncheckedSized(0, 1), true,
false};
OptionallyImplicitIndexInterval e{IndexInterval::UncheckedSized(0, 1), true,
true};
OptionallyImplicitIndexInterval f{IndexInterval::UncheckedSized(0, 0), false,
false};
OptionallyImplicitIndexInterval g{IndexInterval::UncheckedSized(0, 2), false,
false};
OptionallyImplicitIndexInterval h{IndexInterval::UncheckedSized(1, 2), false,
false};
OptionallyImplicitIndexInterval i{IndexInterval::UncheckedSized(1, 2), false,
true};
OptionallyImplicitIndexInterval j{IndexInterval::UncheckedSized(1, 2), true,
false};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_EQ(e, e);
EXPECT_EQ(f, f);
EXPECT_EQ(g, g);
EXPECT_EQ(h, h);
EXPECT_EQ(i, i);
EXPECT_EQ(j, j);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
EXPECT_NE(a, f);
EXPECT_NE(a, g);
EXPECT_NE(a, h);
EXPECT_NE(a, i);
EXPECT_NE(a, j);
EXPECT_NE(g, j);
EXPECT_NE(g, h);
EXPECT_NE(g, i);
EXPECT_NE(g, j);
}
TEST(OptionallyImplicitIndexIntervalTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
OptionallyImplicitIndexInterval{},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1),
false, true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1), true,
false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1), true,
true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 0),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 2),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(1, 2),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(1, 2),
false, true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(1, 2), true,
false},
}));
}
static_assert(std::is_convertible_v<IndexDomainDimension<container>,
IndexDomainDimension<view>>);
static_assert(std::is_convertible_v<IndexDomainDimension<view>,
IndexDomainDimension<container>>);
static_assert(std::is_assignable_v<IndexDomainDimension<container>,
IndexDomainDimension<view>>);
static_assert(std::is_assignable_v<IndexDomainDimension<view>,
IndexDomainDimension<container>>);
TEST(IndexDomainDimensionTest, DefaultConstruct) {
IndexDomainDimension<> d;
EXPECT_EQ(OptionallyImplicitIndexInterval(),
d.optionally_implicit_interval());
EXPECT_EQ("", d.label());
}
TEST(IndexDomainDimensionTest, ConstructFromOptionallyImplicitIndexInterval) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = interval;
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("", d.label());
}
TEST(IndexDomainDimensionTest, ConstructLabel) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = {interval, "label"};
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("label", d.label());
}
TEST(IndexDomainDimensionTest, ConstructContainerFromView) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<view> d_view = {interval, "label"};
IndexDomainDimension<> d(d_view);
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("label", d.label());
}
TEST(IndexDomainDimensionTest, ConstructViewFromContainer) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = {interval, "label"};
IndexDomainDimension<view> d_view = d;
EXPECT_EQ(interval, d_view.optionally_implicit_interval());
EXPECT_EQ("label", d_view.label());
}
TEST(IndexDomainDimensionTest, AssignContainerFromView) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<view> d_view = {interval, "label"};
IndexDomainDimension<> d;
d = d_view;
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("label", d.label());
}
TEST(IndexDomainDimensionTest, AssignViewFromContainer) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = {interval, "label"};
IndexDomainDimension<view> d_view;
d_view = d;
EXPECT_EQ(interval, d_view.optionally_implicit_interval());
EXPECT_EQ("label", d_view.label());
}
TEST(IndexDomainDimensionTest, PrintToOstream) {
EXPECT_EQ("[0, 10*)",
StrCat(IndexDomainDimension<>{
{IndexInterval::UncheckedSized(0, 10), false, true}, ""}));
EXPECT_EQ("[0, 10*)",
StrCat(IndexDomainDimension<view>{
{IndexInterval::UncheckedSized(0, 10), false, true}, ""}));
EXPECT_EQ("\"label\": [0, 10*)",
StrCat(IndexDomainDimension<>{
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"}));
}
TEST(IndexDomainDimensionTest, Compare) {
IndexDomainDimension<> d1 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<view> d1_view = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<> d2 = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<view> d2_view = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<> d3 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label2"};
EXPECT_EQ(d1, d1);
EXPECT_EQ(d1, d1_view);
EXPECT_EQ(d1_view, d1);
EXPECT_EQ(d1_view, d1_view);
EXPECT_EQ(d2, d2);
EXPECT_EQ(d3, d3);
EXPECT_NE(d1, d2);
EXPECT_NE(d1, d2_view);
EXPECT_NE(d1_view, d2);
EXPECT_NE(d1_view, d2_view);
EXPECT_NE(d1, d3);
}
TEST(IndexDomainDimensionTest, Hash) {
IndexDomainDimension<> d1 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<view> d1_view = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<> d2 = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<view> d2_view = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<> d3 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label2"};
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({d1, d2, d3}));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({d1_view, d2_view}));
}
static_assert(ExplicitIndexOr(10, 11) == 10);
static_assert(ExplicitIndexOr(kImplicit, 11) == 11);
static_assert(ImplicitOrEqual(10, 10));
static_assert(ImplicitOrEqual(kImplicit, 10));
static_assert(!ImplicitOrEqual(10, 11));
static_assert(DividePositiveRoundOut(IndexInterval::UncheckedHalfOpen(3, 10),
2) ==
IndexInterval::UncheckedHalfOpen(1, 5));
static_assert(DividePositiveRoundOut(IndexInterval::UncheckedHalfOpen(3, 11),
2) ==
IndexInterval::UncheckedHalfOpen(1, 6));
static_assert(DividePositiveRoundOut(IndexInterval::UncheckedHalfOpen(-3, 10),
2) ==
IndexInterval::UncheckedHalfOpen(-2, 5));
TEST(IndexIntervalTest, Negate) {
EXPECT_EQ(IndexInterval::UncheckedSized(0, 0),
-IndexInterval::UncheckedSized(0, 0));
EXPECT_EQ(IndexInterval::UncheckedSized(5, 0),
-IndexInterval::UncheckedSized(-5, 0));
EXPECT_EQ(
IndexInterval::UncheckedClosed(kMaxFiniteIndex, kMaxFiniteIndex),
-IndexInterval::UncheckedClosed(-kMaxFiniteIndex, -kMaxFiniteIndex));
EXPECT_EQ(IndexInterval(), -IndexInterval());
EXPECT_EQ(IndexInterval::UncheckedClosed(-5, 6),
-IndexInterval::UncheckedClosed(-6, 5));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 30),
-IndexInterval::UncheckedClosed(-30, -5));
}
TEST(MergeDimensionLabelsTest, Basic) {
EXPECT_THAT(MergeDimensionLabels("a", ""),
::testing::Optional(std::string("a")));
EXPECT_THAT(MergeDimensionLabels("a", "a"),
::testing::Optional(std::string("a")));
EXPECT_THAT(MergeDimensionLabels("", "a"),
::testing::Optional(std::string("a")));
EXPECT_THAT(MergeDimensionLabels("", ""),
::testing::Optional(std::string("")));
EXPECT_THAT(MergeDimensionLabels("a", "b"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension labels do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, EqualExplicit) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, EqualImplicit) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false}));
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, UpperUnspecified) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, kInfIndex), false, true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, kInfIndex), false, true}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, LowerUnspecified) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 5), true, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 5), true, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchLower) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(2, 5), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Lower bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchLowerInfinite) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 5), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Lower bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, LowerImplicitMerge) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, UpperImplicitMerge) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchUpper) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 6), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Upper bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchUpperInfinite) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, kInfIndex), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Upper bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchUpperImplicit) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 6), false, true}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Upper bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, InvalidInterval) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, -5), true, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(5, kInfIndex), false, true}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"\\(5, -5\\) do not specify a valid closed index interval"));
}
TEST(IndexIntervalSerializationTest, Basic) {
TestSerializationRoundTrip(IndexInterval::UncheckedSized(1, 2));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_interval.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_interval_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b2f293ea-f020-4c91-af4f-d136b4414cb6 | cpp | google/tensorstore | rank | tensorstore/rank.cc | tensorstore/rank_test.cc | #include "tensorstore/rank.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::string StaticCastTraits<DimensionIndex>::Describe(DimensionIndex value) {
if (value == dynamic_rank) return "dynamic rank";
return tensorstore::StrCat("rank of ", value);
}
absl::Status ValidateRank(DimensionIndex rank) {
if (!IsValidRank(rank)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank ", rank, " is outside valid range [0, ", kMaxRank, "]"));
}
return absl::OkStatus();
}
} | #include "tensorstore/rank.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::InlineRankLimit;
using ::tensorstore::MatchesStatus;
using ::tensorstore::RankConstraint;
using ::tensorstore::StaticRankCast;
using ::tensorstore::unchecked;
static_assert(RankConstraint::Implies(3, 3));
static_assert(RankConstraint::Implies(3, dynamic_rank));
static_assert(RankConstraint::Implies(dynamic_rank, dynamic_rank));
static_assert(!RankConstraint::Implies(3, 2));
static_assert(!RankConstraint::Implies(dynamic_rank, 3));
static_assert(RankConstraint::EqualOrUnspecified(3, 3));
static_assert(RankConstraint::EqualOrUnspecified(dynamic_rank, dynamic_rank));
static_assert(RankConstraint::EqualOrUnspecified(dynamic_rank, 3));
static_assert(RankConstraint::EqualOrUnspecified(3, dynamic_rank));
static_assert(!RankConstraint::EqualOrUnspecified(3, 2));
static_assert(RankConstraint::Add(2, 3) == 5);
static_assert(RankConstraint::Add({2, 3, 4}) == 9);
static_assert(RankConstraint::Add({2}) == 2);
static_assert(RankConstraint::Add({}) == 0);
static_assert(RankConstraint::Add(dynamic_rank, 3) == dynamic_rank);
static_assert(RankConstraint::Add(3, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::Add(dynamic_rank, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::Subtract(5, 2) == 3);
static_assert(RankConstraint::Subtract(dynamic_rank, 3) == dynamic_rank);
static_assert(RankConstraint::Subtract(3, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::Subtract(dynamic_rank, dynamic_rank) ==
dynamic_rank);
static_assert(RankConstraint::And(dynamic_rank, 5) == 5);
static_assert(RankConstraint::And(5, dynamic_rank) == 5);
static_assert(RankConstraint::And(dynamic_rank, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::And({5, 5, dynamic_rank}) == 5);
static_assert(RankConstraint::And({3}) == 3);
static_assert(RankConstraint::And({}) == dynamic_rank);
static_assert(RankConstraint::LessOrUnspecified(1, 2) == true);
static_assert(RankConstraint::LessOrUnspecified(1, 1) == false);
static_assert(RankConstraint::LessOrUnspecified(dynamic_rank, 2) == true);
static_assert(RankConstraint::LessOrUnspecified(1, dynamic_rank) == true);
static_assert(RankConstraint::LessOrUnspecified(dynamic_rank, dynamic_rank) ==
true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, 2) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, 1) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, 0) == false);
static_assert(RankConstraint::LessEqualOrUnspecified(dynamic_rank, 2) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, dynamic_rank) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(dynamic_rank,
dynamic_rank) == true);
static_assert(RankConstraint::GreaterOrUnspecified(2, 1) == true);
static_assert(RankConstraint::GreaterOrUnspecified(1, 1) == false);
static_assert(RankConstraint::GreaterOrUnspecified(dynamic_rank, 2) == true);
static_assert(RankConstraint::GreaterOrUnspecified(1, dynamic_rank) == true);
static_assert(RankConstraint::GreaterOrUnspecified(dynamic_rank,
dynamic_rank) == true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(2, 1) == true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(1, 1) == true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(0, 1) == false);
static_assert(RankConstraint::GreaterEqualOrUnspecified(dynamic_rank, 2) ==
true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(1, dynamic_rank) ==
true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(dynamic_rank,
dynamic_rank) == true);
TEST(RankCastTest, Basic) {
auto x =
StaticRankCast<3>(std::integral_constant<DimensionIndex, 3>()).value();
static_assert(
std::is_same_v<decltype(x), std::integral_constant<DimensionIndex, 3>>);
auto y = StaticRankCast<dynamic_rank>(x).value();
EXPECT_EQ(3, y);
static_assert(std::is_same_v<decltype(y), DimensionIndex>);
auto a = StaticRankCast<3>(DimensionIndex(3)).value();
auto b = StaticRankCast<dynamic_rank>(DimensionIndex(3)).value();
static_assert(
std::is_same_v<decltype(a), std::integral_constant<DimensionIndex, 3>>);
static_assert(std::is_same_v<decltype(b), DimensionIndex>);
EXPECT_THAT((StaticRankCast<3>(DimensionIndex(2))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast rank of 2 to rank of 3"));
EXPECT_THAT((StaticRankCast<3>(DimensionIndex(3))),
::testing::Optional(tensorstore::StaticRank<3>()));
EXPECT_THAT((StaticRankCast<3>(DimensionIndex(dynamic_rank))),
::testing::Optional(tensorstore::StaticRank<3>()));
}
TEST(RankCastDeathTest, DynamicToStatic) {
EXPECT_DEBUG_DEATH((StaticRankCast<3, unchecked>(DimensionIndex(1))),
"StaticCast is not valid");
}
static_assert(InlineRankLimit(dynamic_rank(0)) == 0);
static_assert(InlineRankLimit(dynamic_rank(1)) == 1);
static_assert(InlineRankLimit(dynamic_rank(2)) == 2);
static_assert(RankConstraint::FromInlineRank(dynamic_rank(0)) == -1);
static_assert(RankConstraint::FromInlineRank(dynamic_rank(1)) == -1);
static_assert(RankConstraint::FromInlineRank(dynamic_rank(2)) == -1);
static_assert(RankConstraint::FromInlineRank(0) == 0);
static_assert(RankConstraint::FromInlineRank(1) == 1);
static_assert(RankConstraint::FromInlineRank(2) == 2);
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/rank.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/rank_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
82d02e5a-7dfd-45a3-8fb6-a86cc367b6a7 | cpp | google/tensorstore | contiguous_layout | tensorstore/contiguous_layout.cc | tensorstore/contiguous_layout_test.cc | #include "tensorstore/contiguous_layout.h"
#include <stddef.h>
#include <cassert>
#include <ostream>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
void ComputeStrides(ContiguousLayoutOrder order, ptrdiff_t element_stride,
tensorstore::span<const Index> shape,
tensorstore::span<Index> strides) {
const DimensionIndex rank = shape.size();
assert(strides.size() == rank);
if (order == ContiguousLayoutOrder::right) {
for (DimensionIndex i = rank - 1; i >= 0; --i) {
strides[i] = element_stride;
element_stride *= shape[i];
}
} else {
for (DimensionIndex i = 0; i < rank; ++i) {
strides[i] = element_stride;
element_stride *= shape[i];
}
}
}
std::ostream& operator<<(std::ostream& os, ContiguousLayoutOrder order) {
return os << (order == ContiguousLayoutOrder::c ? 'C' : 'F');
}
} | #include "tensorstore/contiguous_layout.h"
#include <array>
#include <sstream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::ComputeStrides;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::GetContiguousIndices;
using ::tensorstore::GetContiguousOffset;
using ::tensorstore::Index;
TEST(ContiguousLayoutOrderTest, PrintToOstream) {
{
std::ostringstream ostr;
ostr << ContiguousLayoutOrder::c;
EXPECT_EQ("C", ostr.str());
}
{
std::ostringstream ostr;
ostr << ContiguousLayoutOrder::fortran;
EXPECT_EQ("F", ostr.str());
}
}
TEST(ComputeStridesTest, COrder) {
{
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::c, 1,
tensorstore::span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(20, 5, 1));
}
{
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::c, 2,
tensorstore::span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(40, 10, 2));
}
}
TEST(ComputeStridesTest, FOrder) {
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::fortran, 1,
tensorstore::span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(1, 3, 12));
}
TEST(GetContiguousOffsetTest, Basic) {
Index indices[2];
EXPECT_EQ(3 * 11 + 4,
GetContiguousOffset<ContiguousLayoutOrder::c>({{7, 11}}, {{3, 4}}));
GetContiguousIndices<ContiguousLayoutOrder::c, Index>(3 * 11 + 4, {{7, 11}},
indices);
EXPECT_THAT(indices, ::testing::ElementsAre(3, 4));
EXPECT_EQ(3 + 4 * 7, GetContiguousOffset<ContiguousLayoutOrder::fortran>(
{{7, 11}}, {{3, 4}}));
GetContiguousIndices<ContiguousLayoutOrder::fortran, Index>(
3 + 4 * 7, {{7, 11}}, indices);
EXPECT_THAT(indices, ::testing::ElementsAre(3, 4));
EXPECT_EQ(
2 * (7 * 11) + 3 * 11 + 4,
GetContiguousOffset<ContiguousLayoutOrder::c>({{5, 7, 11}}, {{2, 3, 4}}));
EXPECT_EQ(2 + 5 * 3 + (5 * 7) * 4,
GetContiguousOffset<ContiguousLayoutOrder::fortran>({{5, 7, 11}},
{{2, 3, 4}}));
EXPECT_EQ(0, GetContiguousOffset<ContiguousLayoutOrder::c>({}, {}));
EXPECT_EQ(0, GetContiguousOffset<ContiguousLayoutOrder::fortran>({}, {}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/contiguous_layout.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/contiguous_layout_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
65904162-ffaa-4bb0-b916-1b9789122f7b | cpp | google/tensorstore | array_testutil | tensorstore/array_testutil.cc | tensorstore/array_testutil_test.cc | #include "tensorstore/array_testutil.h"
#include <ostream>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_array {
class ArrayMatcherImpl
: public ::testing::MatcherInterface<OffsetArrayView<const void>> {
public:
ArrayMatcherImpl(SharedOffsetArray<const void> expected,
EqualityComparisonKind comparison_kind)
: expected_(std::move(expected)), comparison_kind_(comparison_kind) {}
bool MatchAndExplain(
OffsetArrayView<const void> value,
::testing::MatchResultListener* listener) const override {
const bool listener_interested = listener->IsInterested();
if (value.dtype() != expected_.dtype()) {
if (listener_interested) {
*listener << "which has a data type of " << value.dtype();
}
return false;
}
if (expected_.domain() != value.domain()) {
if (listener_interested) {
*listener << "which has a domain of " << value.domain();
}
return false;
}
if (AreArraysEqual(expected_, value, comparison_kind_)) {
return true;
}
if (!listener_interested) return false;
bool reason_printed = false;
IterateOverIndexRange(
value.domain(), [&](tensorstore::span<const Index> indices) {
if (!AreArraysEqual(value[indices], expected_[indices],
comparison_kind_)) {
if (reason_printed) {
*listener << ", ";
}
*listener << "whose element at " << indices
<< " doesn't match, expected=" << expected_[indices]
<< ", actual=" << value[indices];
reason_printed = true;
}
});
return false;
}
void DescribeTo(std::ostream* os) const override {
*os << "has a data type of " << expected_.dtype() << " and a domain of "
<< expected_.domain() << " and is "
<< (comparison_kind_ == EqualityComparisonKind::equal ? "equal"
: "identical")
<< " to " << expected_;
}
private:
SharedOffsetArray<const void> expected_;
EqualityComparisonKind comparison_kind_;
};
}
ArrayMatcher MatchesArray(SharedOffsetArray<const void> expected,
EqualityComparisonKind comparison_kind) {
return ::testing::MakeMatcher(new internal_array::ArrayMatcherImpl(
std::move(expected), comparison_kind));
}
} | #include "tensorstore/array_testutil.h"
#include <sstream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesArray;
using ::tensorstore::MatchesScalarArray;
TEST(MatchesArrayTest, Describe) {
std::ostringstream ss;
MatchesArray<std::int32_t>({1, 2}).DescribeTo(&ss);
EXPECT_EQ(
R"(has a data type of int32 and a domain of {origin={0}, shape={2}} where
element at {0} is equal to 1,
element at {1} is equal to 2)",
ss.str());
}
TEST(MatchesArrayTest, DescribeNegation) {
std::ostringstream ss;
MatchesArray<std::int32_t>({1, 2}).DescribeNegationTo(&ss);
EXPECT_EQ(R"(doesn't have a data type of int32, or
doesn't have a domain of {origin={0}, shape={2}}, or
element at {0} isn't equal to 1, or
element at {1} isn't equal to 2)",
ss.str());
}
TEST(MatchesArrayTest, ExplainDataTypeMismatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(MatchesArray<std::int32_t>({1, 2, 3}),
MakeArray<float>({1, 2}), &listener);
EXPECT_EQ("which has a data type of float32", listener.str());
}
TEST(MatchesArrayTest, ExplainDomainMismatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(MatchesArray<int>({1, 2, 3}),
MakeArray<int>({1, 2}), &listener);
EXPECT_EQ("", listener.str());
}
TEST(MatchesArrayTest, ExplainElementMismatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(MatchesArray<int>({1, 2}),
MakeArray<int>({1, 3}), &listener);
EXPECT_EQ("whose element at {1} doesn't match", listener.str());
}
TEST(MatchesArrayTest, ExplainElementMatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(
MatchesArray<std::string>(
{::testing::Not(::testing::ElementsAre('d')),
::testing::Not(::testing::ElementsAre('a', 'b'))}),
MakeArray<std::string>({"x", "ac"}), &listener);
EXPECT_EQ(
"whose element at {0} matches, whose element #0 doesn't match,\n"
"and whose element at {1} matches, whose element #1 doesn't match",
listener.str());
}
TEST(MatchesArrayTest, ExplainElementMismatchExplanation) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(
MatchesScalarArray<std::string>(::testing::ElementsAre('a', 'b')),
MakeScalarArray<std::string>("ac"), &listener);
EXPECT_EQ("whose element at {} doesn't match, whose element #1 doesn't match",
listener.str());
}
TEST(MatchesArrayTest, Matches) {
EXPECT_THAT(MakeScalarArray<int>(1), MatchesScalarArray<int>(1));
EXPECT_THAT(MakeArray<int>({1, 2}), MatchesArray<int>({1, 2}));
EXPECT_THAT(MakeArray<int>({{1, 2}}), MatchesArray<int>({{1, 2}}));
EXPECT_THAT(MakeArray<int>({{{1, 2}}}), MatchesArray<int>({{{1, 2}}}));
EXPECT_THAT(MakeArray<int>({{{{1, 2}}}}), MatchesArray<int>({{{{1, 2}}}}));
EXPECT_THAT(MakeArray<int>({{{{{1, 2}}}}}),
MatchesArray<int>({{{{{1, 2}}}}}));
EXPECT_THAT(MakeArray<int>({{{{{{1, 2}}}}}}),
MatchesArray<int>({{{{{{1, 2}}}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3}, {1, 2}),
MatchesArray<int>({3}, {1, 2}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4}, {{1, 2}}),
MatchesArray<int>({3, 4}, {{1, 2}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5}, {{{1, 2}}}),
MatchesArray<int>({3, 4, 5}, {{{1, 2}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6}, {{{{1, 2}}}}),
MatchesArray<int>({3, 4, 5, 6}, {{{{1, 2}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6, 7}, {{{{{1, 2}}}}}),
MatchesArray<int>({3, 4, 5, 6, 7}, {{{{{1, 2}}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6, 7, 8}, {{{{{{1, 2}}}}}}),
MatchesArray<int>({3, 4, 5, 6, 7, 8}, {{{{{{1, 2}}}}}}));
EXPECT_THAT(
MakeOffsetArray<int>({3}, {1, 2}),
MatchesArray<int>(tensorstore::span<const Index, 1>({3}), {1, 2}));
EXPECT_THAT(
MakeOffsetArray<int>({3, 4}, {{1, 2}}),
MatchesArray<int>(tensorstore::span<const Index, 2>({3, 4}), {{1, 2}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5}, {{{1, 2}}}),
MatchesArray<int>(tensorstore::span<const Index, 3>({3, 4, 5}),
{{{1, 2}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6}, {{{{1, 2}}}}),
MatchesArray<int>(tensorstore::span<const Index, 4>({3, 4, 5, 6}),
{{{{1, 2}}}}));
EXPECT_THAT(
MakeOffsetArray<int>({3, 4, 5, 6, 7}, {{{{{1, 2}}}}}),
MatchesArray<int>(tensorstore::span<const Index, 5>({3, 4, 5, 6, 7}),
{{{{{1, 2}}}}}));
EXPECT_THAT(
MakeOffsetArray<int>({3, 4, 5, 6, 7, 8}, {{{{{{1, 2}}}}}}),
MatchesArray<int>(tensorstore::span<const Index, 6>({3, 4, 5, 6, 7, 8}),
{{{{{{1, 2}}}}}}));
EXPECT_THAT(MakeArray<int>({1, 3}),
::testing::Not(MatchesArray<int>({1, 2})));
EXPECT_THAT(MakeArray<int>({1}), ::testing::Not(MatchesArray<int>({1, 2})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/array_testutil.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/array_testutil_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a69130c5-914d-471f-85d3-3c9c8caa6645 | cpp | google/tensorstore | open_mode | tensorstore/open_mode.cc | tensorstore/open_mode_test.cc | #include "tensorstore/open_mode.h"
#include <ostream>
#include "absl/status/status.h"
namespace tensorstore {
std::string_view to_string(ReadWriteMode mode) {
switch (mode) {
case ReadWriteMode::dynamic:
return "dynamic";
case ReadWriteMode::read:
return "read";
case ReadWriteMode::write:
return "write";
case ReadWriteMode::read_write:
return "read_write";
default:
return "<unknown>";
}
}
std::ostream& operator<<(std::ostream& os, ReadWriteMode mode) {
return os << to_string(mode);
}
std::ostream& operator<<(std::ostream& os, OpenMode mode) {
const char* sep = "";
constexpr const char* kSep = "|";
if (!!(mode & OpenMode::open)) {
os << "open";
sep = kSep;
}
if (!!(mode & OpenMode::create)) {
os << sep << "create";
sep = kSep;
}
if (!!(mode & OpenMode::delete_existing)) {
os << sep << "delete_existing";
sep = kSep;
}
if (!!(mode & OpenMode::assume_metadata)) {
os << sep << "assume_metadata";
sep = kSep;
}
return os;
}
namespace internal {
absl::Status ValidateSupportsRead(ReadWriteMode mode) {
return !(mode & ReadWriteMode::read)
? absl::InvalidArgumentError("Source does not support reading.")
: absl::Status();
}
absl::Status ValidateSupportsWrite(ReadWriteMode mode) {
return !(mode & ReadWriteMode::write)
? absl::InvalidArgumentError(
"Destination does not support writing.")
: absl::Status();
}
absl::Status ValidateSupportsModes(ReadWriteMode mode,
ReadWriteMode required_modes) {
if ((mode & required_modes) != required_modes) {
if (!!(required_modes & ReadWriteMode::read) &&
!(mode & ReadWriteMode::read)) {
return absl::InvalidArgumentError("Read mode not supported");
}
if (!!(required_modes & ReadWriteMode::write) &&
!(mode & ReadWriteMode::write)) {
return absl::InvalidArgumentError("Write mode not supported");
}
}
return absl::OkStatus();
}
}
} | #include "tensorstore/open_mode.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::OpenMode;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::StrCat;
static_assert(ReadWriteMode::read_write ==
(ReadWriteMode::read | ReadWriteMode::write));
static_assert((ReadWriteMode::read_write & ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(!ReadWriteMode::dynamic);
static_assert(tensorstore::internal::StaticReadWriteMask(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::write) == ReadWriteMode::write);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::dynamic) == ReadWriteMode::read_write);
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::read));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::write));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::dynamic,
ReadWriteMode::dynamic));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::read, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::write, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::write));
TEST(ReadWriteModeTest, PrintToOstream) {
EXPECT_EQ("dynamic", StrCat(ReadWriteMode::dynamic));
EXPECT_EQ("read", StrCat(ReadWriteMode::read));
EXPECT_EQ("write", StrCat(ReadWriteMode::write));
EXPECT_EQ("read_write", StrCat(ReadWriteMode::read_write));
EXPECT_EQ("<unknown>", StrCat(static_cast<ReadWriteMode>(10)));
}
TEST(OpenTest, PrintToOstream) {
EXPECT_EQ("", StrCat(OpenMode{}));
EXPECT_EQ("open", StrCat(OpenMode::open));
EXPECT_EQ("create", StrCat(OpenMode::create));
EXPECT_EQ("open|create", StrCat(OpenMode::open | OpenMode::create));
EXPECT_EQ("open|assume_metadata",
StrCat(OpenMode::open | OpenMode::assume_metadata));
EXPECT_EQ("create|delete_existing",
StrCat(OpenMode::create | OpenMode::delete_existing));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/open_mode.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/open_mode_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4854e98f-6b0a-450f-b9b6-840922cf00e1 | cpp | google/tensorstore | codec_spec | tensorstore/driver/zarr3/codec/codec_spec.cc | tensorstore/codec_spec_test.cc | #include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include <stddef.h>
#include "absl/base/no_destructor.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
namespace tensorstore {
namespace internal_zarr3 {
ZarrCodecSpec::~ZarrCodecSpec() = default;
ZarrCodecKind ZarrArrayToArrayCodecSpec::kind() const {
return ZarrCodecKind::kArrayToArray;
}
ZarrCodecKind ZarrArrayToBytesCodecSpec::kind() const {
return ZarrCodecKind::kArrayToBytes;
}
size_t ZarrArrayToBytesCodecSpec::sharding_height() const { return 0; }
ZarrCodecKind ZarrBytesToBytesCodecSpec::kind() const {
return ZarrCodecKind::kBytesToBytes;
}
CodecRegistry& GetCodecRegistry() {
static absl::NoDestructor<CodecRegistry> registry;
return *registry;
}
}
} | #include "tensorstore/codec_spec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(CodecSpecSerializationTest, SerializationRoundTrip) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec,
tensorstore::CodecSpec::FromJson({
{"driver", "zarr"},
{"compressor", nullptr},
{"filters", nullptr},
}));
TestSerializationRoundTrip(codec);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_spec.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/codec_spec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
45888bc4-a3d3-4618-b739-cd96e097b907 | cpp | google/tensorstore | strided_layout | tensorstore/strided_layout.cc | tensorstore/strided_layout_test.cc | #include "tensorstore/strided_layout.h"
#include <stddef.h>
#include <algorithm>
#include <cstdlib>
#include <ostream>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_strided_layout {
void PrintToOstream(
std::ostream& os,
const StridedLayoutView<dynamic_rank, offset_origin>& layout) {
os << "{domain=" << layout.domain()
<< ", byte_strides=" << layout.byte_strides() << "}";
}
std::string DescribeForCast(DimensionIndex rank) {
return tensorstore::StrCat("strided layout with ",
StaticCastTraits<DimensionIndex>::Describe(rank));
}
bool StridedLayoutsEqual(StridedLayoutView<dynamic_rank, offset_origin> a,
StridedLayoutView<dynamic_rank, offset_origin> b) {
return a.domain() == b.domain() &&
internal::RangesEqual(a.byte_strides(), b.byte_strides());
}
}
std::ostream& operator<<(std::ostream& os, ArrayOriginKind origin_kind) {
return os << (origin_kind == zero_origin ? "zero" : "offset");
}
namespace internal_strided_layout {
bool IsContiguousLayout(DimensionIndex rank, const Index* shape,
const Index* byte_strides, ContiguousLayoutOrder order,
Index element_size) {
if (rank == 0) return true;
Index stride = element_size;
if (order == c_order) {
for (DimensionIndex i = rank - 1; i != 0; --i) {
if (byte_strides[i] != stride) return false;
if (internal::MulOverflow(stride, shape[i], &stride)) {
return false;
}
}
if (byte_strides[0] != stride) return false;
} else {
for (DimensionIndex i = 0; i != rank - 1; ++i) {
if (byte_strides[i] != stride) return false;
if (i == rank - 1) break;
if (internal::MulOverflow(stride, shape[i], &stride)) {
return false;
}
}
if (byte_strides[rank - 1] != stride) return false;
}
return true;
}
bool IsBroadcastScalar(DimensionIndex rank, const Index* shape,
const Index* byte_strides) {
for (DimensionIndex i = 0; i < rank; ++i) {
if (shape[i] > 1 && byte_strides[i] != 0) return false;
}
return true;
}
Index GetByteExtent(StridedLayoutView<> layout, Index element_size) {
Index byte_extent = element_size;
for (DimensionIndex i = 0, rank = layout.rank(); i < rank; ++i) {
const Index size = layout.shape()[i];
if (size == 0) return 0;
if (size == 1) continue;
byte_extent =
std::max(byte_extent, internal::wrap_on_overflow::Multiply(
std::abs(layout.byte_strides()[i]), size));
}
return byte_extent;
}
}
absl::Status ValidateShapeBroadcast(
tensorstore::span<const Index> source_shape,
tensorstore::span<const Index> target_shape) {
for (DimensionIndex source_dim = 0; source_dim < source_shape.size();
++source_dim) {
const Index source_size = source_shape[source_dim];
if (source_size == 1) continue;
const DimensionIndex target_dim =
target_shape.size() - source_shape.size() + source_dim;
if (target_dim < 0 || target_shape[target_dim] != source_size) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot broadcast array of shape ", source_shape,
" to target shape ", target_shape));
}
}
return absl::OkStatus();
}
absl::Status BroadcastStridedLayout(StridedLayoutView<> source,
tensorstore::span<const Index> target_shape,
Index* target_byte_strides) {
TENSORSTORE_RETURN_IF_ERROR(
ValidateShapeBroadcast(source.shape(), target_shape));
for (DimensionIndex target_dim = 0; target_dim < target_shape.size();
++target_dim) {
const DimensionIndex source_dim =
target_dim + source.rank() - target_shape.size();
target_byte_strides[target_dim] =
(source_dim < 0 || source.shape()[source_dim] == 1)
? 0
: source.byte_strides()[source_dim];
}
return absl::OkStatus();
}
absl::Status BroadcastStridedLayout(StridedLayoutView<> source,
tensorstore::span<const Index> target_shape,
StridedLayout<>& target) {
target.set_rank(target_shape.size());
std::copy(target_shape.begin(), target_shape.end(), target.shape().begin());
return BroadcastStridedLayout(source, target_shape,
target.byte_strides().data());
}
Result<Index> BroadcastStridedLayout(
StridedLayoutView<dynamic_rank, offset_origin> source,
BoxView<> target_domain,
StridedLayout<dynamic_rank, offset_origin>& target) {
target.set_rank(target_domain.rank());
TENSORSTORE_RETURN_IF_ERROR(BroadcastStridedLayout(
StridedLayoutView<>(source.shape(), source.byte_strides()),
target_domain.shape(), target.byte_strides().data()));
std::copy_n(target_domain.origin().begin(), target_domain.rank(),
target.origin().begin());
std::copy_n(target_domain.shape().begin(), target_domain.rank(),
target.shape().begin());
return internal::wrap_on_overflow::Subtract(source.origin_byte_offset(),
target.origin_byte_offset());
}
} | #include "tensorstore/strided_layout.h"
#include <array>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
#ifdef NDEBUG
#define TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(stmt, pattern)
#else
#define TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(stmt, pattern) \
EXPECT_DEATH(stmt, pattern)
#endif
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::GetSubLayoutView;
using ::tensorstore::Index;
using ::tensorstore::IndexInnerProduct;
using ::tensorstore::IsStridedLayout;
using ::tensorstore::MatchesStatus;
using ::tensorstore::offset_origin;
using ::tensorstore::StaticCast;
using ::tensorstore::StaticRankCast;
using ::tensorstore::StrCat;
using ::tensorstore::StridedLayout;
using ::tensorstore::StridedLayoutView;
using ::tensorstore::unchecked;
using ::tensorstore::zero_origin;
static_assert(!IsStridedLayout<int>);
static_assert(IsStridedLayout<StridedLayout<>>);
static_assert(IsStridedLayout<StridedLayout<2, offset_origin>>);
static_assert(IsStridedLayout<StridedLayoutView<>>);
static_assert(IsStridedLayout<StridedLayoutView<2, offset_origin>>);
namespace dynamic_layout_cast_tests {
template <typename T>
constexpr inline bool NoOpCheck =
std::is_same_v<T, decltype(StaticCast<absl::remove_cvref_t<T>, unchecked>(
std::declval<T>()))>;
static_assert(NoOpCheck<const StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&&>);
static_assert(NoOpCheck<const StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&&>);
}
namespace dynamic_rank_cast_tests {
template <typename T>
constexpr inline bool NoOpCheck = std::is_same_v<
T, decltype(StaticRankCast<absl::remove_cvref_t<T>::static_rank, unchecked>(
std::declval<T>()))>;
static_assert(NoOpCheck<const StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&&>);
static_assert(NoOpCheck<const StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&&>);
}
static_assert(std::is_empty_v<StridedLayout<0>>);
static_assert(std::is_empty_v<StridedLayoutView<0>>);
static_assert(sizeof(Index) * 2 == sizeof(StridedLayout<1>));
static_assert(sizeof(Index) * 4 == sizeof(StridedLayout<2>));
static_assert(sizeof(Index*) * 2 == sizeof(StridedLayout<>));
static_assert(sizeof(Index*) * 3 == sizeof(StridedLayoutView<>));
static_assert(sizeof(Index*) * 2 == sizeof(StridedLayoutView<2>));
static_assert(sizeof(Index*) * 3 ==
sizeof(StridedLayoutView<2, offset_origin>));
TEST(IndexInnerProductTest, Basic) {
const Index a[] = {1, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(1 * 4 + 2 * 5 + 3 * 6, IndexInnerProduct(3, a, b));
}
TEST(IndexInnerProductTest, WrapOnOverflowMultiply) {
const Index a[] = {Index(1) << 62, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(2 * 5 + 3 * 6, IndexInnerProduct(3, a, b));
}
TEST(IndexInnerProductTest, WrapOnOverflowAdd) {
const Index a[] = {Index(1) << 62, Index(1) << 62};
const Index b[] = {2, 2};
EXPECT_EQ(0, IndexInnerProduct(2, a, b));
}
TEST(IndexInnerProductTest, Span) {
const Index a[] = {1, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(1 * 4 + 2 * 5 + 3 * 6,
IndexInnerProduct(tensorstore::span(a), tensorstore::span(b)));
}
namespace conversion_tests {
using ::tensorstore::internal::IsOnlyExplicitlyConvertible;
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<dynamic_rank, offset_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<2, offset_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(
IsOnlyExplicitlyConvertible<
StridedLayoutView<2, offset_origin>, StridedLayout<2, offset_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<dynamic_rank, zero_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(
IsOnlyExplicitlyConvertible<
StridedLayoutView<2, zero_origin>, StridedLayout<2, offset_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<dynamic_rank, zero_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<2, zero_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(!std::is_constructible_v<
StridedLayout<dynamic_rank, zero_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(!std::is_constructible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<dynamic_rank, zero_origin>>);
static_assert(!std::is_constructible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<3, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayoutView<0, offset_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(
std::is_convertible_v<
StridedLayoutView<0, offset_origin>, StridedLayout<0, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<2, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<dynamic_rank, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<2, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, offset_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<dynamic_rank, offset_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<2, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<2, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, offset_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<0, offset_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<0, offset_origin>,
StridedLayout<0, zero_origin>>);
}
TEST(StridedLayoutTest, DynamicRank0) {
StridedLayout<> layout;
EXPECT_EQ(0, layout.rank());
EXPECT_EQ(1, layout.num_elements());
EXPECT_TRUE(layout.shape().empty());
EXPECT_TRUE(layout.byte_strides().empty());
EXPECT_EQ(0, layout());
}
TEST(StridedLayoutDeathTest, DynamicRank0) {
StridedLayout<> layout;
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout[{1}], "Length of index vector is greater than rank of array");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout({1}), "Length of index vector must match rank of array.");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout(1), "Length of index vector must match rank of array.");
}
TEST(StridedLayoutTest, DynamicRankCopyAndMove) {
StridedLayout<> layout;
layout.set_rank(3);
EXPECT_EQ(3, layout.rank());
layout.shape()[0] = 7;
layout.shape()[1] = 8;
layout.shape()[2] = 9;
layout.byte_strides()[0] = 4;
layout.byte_strides()[1] = 5;
layout.byte_strides()[2] = 6;
EXPECT_EQ(7 * 8 * 9, layout.num_elements());
EXPECT_EQ(8 + 5, (layout[{2, 1}]));
EXPECT_EQ(8 + 5 + 6, (layout[{2, 1, 1}]));
EXPECT_EQ(8 + 5 + 6, (layout({2, 1, 1})));
EXPECT_EQ(8 + 5 + 6, layout(tensorstore::span({2, 1, 1})));
EXPECT_EQ(8 + 5 + 6, layout(2, 1, 1));
auto layout2 = layout;
EXPECT_EQ(3, layout2.rank());
EXPECT_THAT(layout2.shape(), ::testing::ElementsAreArray({7, 8, 9}));
EXPECT_THAT(layout2.byte_strides(), ::testing::ElementsAreArray({4, 5, 6}));
EXPECT_TRUE(layout == layout2);
EXPECT_FALSE(layout != layout2);
layout.shape()[0] = 1;
EXPECT_FALSE(layout == layout2);
EXPECT_TRUE(layout != layout2);
const auto* shape = layout2.shape().data();
const auto* byte_strides = layout2.byte_strides().data();
auto layout3 = std::move(layout2);
EXPECT_EQ(0, layout2.rank());
EXPECT_EQ(3, layout3.rank());
EXPECT_EQ(shape, layout3.shape().data());
EXPECT_EQ(byte_strides, layout3.byte_strides().data());
StridedLayout<> layout4 = layout;
layout4 = std::move(layout3);
EXPECT_EQ(3, layout4.rank());
EXPECT_EQ(shape, layout4.shape().data());
EXPECT_EQ(byte_strides, layout4.byte_strides().data());
}
TEST(StridedLayoutTest, ConstructDynamicFromShapeAndByteStrides) {
const Index shape_arr[] = {1, 2};
const Index byte_strides_arr[] = {3, 4};
tensorstore::span<const Index> shape(shape_arr);
tensorstore::span<const Index> byte_strides(byte_strides_arr);
StridedLayout<> layout5(shape, byte_strides);
EXPECT_EQ(2, layout5.rank());
EXPECT_THAT(layout5.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout5.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutDeathTest, ConstructDynamicFromShapeAndByteStrides) {
const Index shape_arr[] = {1, 2};
const Index byte_strides_arr[] = {3};
tensorstore::span<const Index> shape(shape_arr);
tensorstore::span<const Index> byte_strides(byte_strides_arr);
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((StridedLayout<>(shape, byte_strides)),
"shape");
}
TEST(StridedLayoutTest, ConstructDynamicFromStridedLayoutView) {
const Index shape_arr[] = {1, 2};
const Index byte_strides_arr[] = {3, 4};
StridedLayoutView<> layout_ref(shape_arr, byte_strides_arr);
StridedLayout<> layout(layout_ref);
EXPECT_EQ(2, layout.rank());
EXPECT_THAT(layout.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray({3, 4}));
EXPECT_NE(layout_ref.shape().data(), layout.shape().data());
EXPECT_NE(layout_ref.byte_strides().data(), layout.byte_strides().data());
}
TEST(StridedLayoutTest, ConstructDynamicFromStatic) {
StridedLayout<2> layout_s({1, 2}, {3, 4});
StridedLayout<> layout_d(layout_s);
EXPECT_EQ(2, layout_d.rank());
EXPECT_THAT(layout_d.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_d.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, AssignDynamicFromDynamic) {
StridedLayout<> layout1({1, 2}, {3, 4});
StridedLayout<> layout2;
layout2 = layout1;
EXPECT_EQ(2, layout2.rank());
EXPECT_THAT(layout2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, AssignDynamicFromDynamicRef) {
StridedLayout<> layout1({1, 2}, {3, 4});
StridedLayoutView<> layout_ref = layout1;
StridedLayout<> layout2;
layout2 = layout_ref;
EXPECT_EQ(2, layout2.rank());
EXPECT_THAT(layout2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, AssignDynamicFromStatic) {
StridedLayout<2> layout_s({1, 2}, {3, 4});
StridedLayout<> layout_d;
layout_d = layout_s;
EXPECT_EQ(2, layout_d.rank());
EXPECT_THAT(layout_d.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_d.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutDeathTest, DynamicRankIndexing) {
StridedLayout<> layout(3);
layout.shape()[0] = 7;
layout.shape()[1] = 8;
layout.shape()[2] = 9;
layout.byte_strides()[0] = 4;
layout.byte_strides()[1] = 5;
layout.byte_strides()[2] = 6;
EXPECT_EQ(4 * 6, (layout[{6}]));
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((layout[{7}]),
"Array index out of bounds");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((layout[{-1}]),
"Array index out of bounds");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((layout[{1, 2, 10}]),
"Array index out of bounds");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
(layout[{1, 2, 3, 4}]),
"Length of index vector is greater than rank of array");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout({1, 2}), "Length of index vector must match rank of array");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
(StridedLayout<>(tensorstore::span<const Index>({1}),
tensorstore::span<const Index>({1, 2}))),
"shape");
}
TEST(StridedLayoutTest, StaticRank0) {
StridedLayout<0> layout;
EXPECT_EQ(1, layout.num_elements());
EXPECT_EQ(0, layout.rank());
EXPECT_TRUE(layout.shape().empty());
EXPECT_TRUE(layout.byte_strides().empty());
static_assert(!std::is_assignable_v<StridedLayout<0>, StridedLayout<>>);
static_assert(!std::is_assignable_v<StridedLayout<0>, StridedLayoutView<>>);
static_assert(!std::is_constructible_v<StridedLayout<0>, StridedLayout<1>>);
static_assert(
!std::is_constructible_v<StridedLayout<0>, StridedLayoutView<1>>);
StridedLayout<0> layout3(tensorstore::span<const Index, 0>{},
tensorstore::span<const Index, 0>{});
[[maybe_unused]] StridedLayout<0> layout2 = layout;
layout3 = layout;
StridedLayout<0> layout5{StridedLayoutView<0>{}};
EXPECT_EQ(0, layout());
EXPECT_EQ(0, (layout[std::array<int, 0>{}]));
EXPECT_EQ(0, (layout(std::array<int, 0>{})));
}
TEST(StridedLayoutTest, DefaultConstructStatic) {
StridedLayout<2> layout;
EXPECT_EQ(2, layout.rank());
}
TEST(StridedLayoutTest, ConstructStaticFromArrays) {
StridedLayout<2> layout({1, 2}, {3, 4});
EXPECT_THAT(layout.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, ConstructDynamicFromArrays) {
StridedLayout<> layout({1, 2}, {3, 4});
EXPECT_EQ(2, layout.rank());
EXPECT_THAT(layout.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, ConstructStaticFromDynamic) {
StridedLayout<> layout_d({1, 2}, {3, 4});
auto layout_s = StaticRankCast<2>(layout_d).value();
static_assert(std::is_same_v<decltype(layout_s), StridedLayout<2>>);
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
static_assert(!std::is_constructible_v<StridedLayout<2>, StridedLayout<3>>);
static_assert(!std::is_assignable_v<StridedLayout<2>, StridedLayout<3>>);
StridedLayout<2> layout_s2(layout_s);
EXPECT_THAT(layout_s2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
static_assert(!std::is_constructible_v<StridedLayout<2>, StridedLayout<>>);
}
TEST(StridedLayoutTest, ConstructStaticFromDynamicStridedLayoutView) {
StridedLayout<> layout_d({1, 2}, {3, 4});
StridedLayoutView<> layout_ref = layout_d;
auto layout_s = StaticCast<StridedLayout<2>>(layout_ref).value();
static_assert(std::is_same_v<decltype(layout_s), StridedLayout<2>>);
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
auto layout_ref2 = StaticCast<StridedLayoutView<2>>(layout_d).value();
StridedLayout<2> layout_s2(layout_ref2);
EXPECT_THAT(layout_s2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
static_assert(
!std::is_constructible_v<StridedLayout<2>, StridedLayoutView<3>>);
}
TEST(StridedLayoutTest, AssignStatic) {
StridedLayout<> layout_d({1, 2}, {3, 4});
static_assert(!std::is_assignable_v<StridedLayout<2>, StridedLayout<>>);
static_assert(!std::is_assignable_v<StridedLayout<2>, StridedLayoutView<>>);
{
StridedLayout<2> layout_s;
layout_s = StaticRankCast<2>(layout_d).value();
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
{
StridedLayout<2> layout_s;
layout_s = StaticCast<StridedLayoutView<2>>(layout_d).value();
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
}
TEST(StridedLayoutTest, StaticIndexing) {
StridedLayout<2> layout({3, 5}, {3, 4});
EXPECT_EQ(6 + 4, layout(2, 1));
}
TEST(StridedLayoutViewTest, StaticConstructDefault) {
StridedLayoutView<2> ref;
EXPECT_EQ(2, ref.rank());
EXPECT_EQ(0, ref.shape()[0]);
EXPECT_EQ(0, ref.shape()[1]);
EXPECT_EQ(0, ref.byte_strides()[0]);
EXPECT_EQ(0, ref.byte_strides()[1]);
}
TEST(StridedLayoutViewTest, StaticConstructFromSpans) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<2> ref(shape, byte_strides);
EXPECT_EQ(&shape[0], ref.shape().data());
EXPECT_EQ(&byte_strides[0], ref.byte_strides().data());
}
TEST(StridedLayoutViewTest, StaticConstructAndAssign) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<2> ref(shape, byte_strides);
{
StridedLayoutView<2> ref2 = ref;
EXPECT_EQ(&shape[0], ref2.shape().data());
EXPECT_EQ(&byte_strides[0], ref2.byte_strides().data());
}
{
StridedLayoutView<2> ref2 =
StaticRankCast<2>(StridedLayoutView<>{ref}).value();
EXPECT_EQ(&shape[0], ref2.shape().data());
EXPECT_EQ(&byte_strides[0], ref2.byte_strides().data());
}
static_assert(
!std::is_convertible_v<StridedLayoutView<>, StridedLayoutView<2>>);
static_assert(!std::is_convertible_v<StridedLayout<>, StridedLayoutView<2>>);
static_assert(
!std::is_constructible_v<StridedLayoutView<2>, StridedLayoutView<3>>);
static_assert(
!std::is_constructible_v<StridedLayoutView<2>, StridedLayoutView<>>);
static_assert(
!std::is_constructible_v<StridedLayoutView<2>, StridedLayout<>>);
static_assert(
!std::is_assignable_v<StridedLayoutView<2>, StridedLayoutView<>>);
static_assert(!std::is_assignable_v<StridedLayoutView<2>, StridedLayout<>>);
static_assert(!std::is_assignable_v<StridedLayoutView<2>, StridedLayout<3>>);
static_assert(
!std::is_assignable_v<StridedLayoutView<2>, StridedLayoutView<3>>);
{
StridedLayoutView<2> ref2;
ref2 = ref;
EXPECT_EQ(&shape[0], ref2.shape().data());
EXPECT_EQ(&byte_strides[0], ref2.byte_strides().data());
}
{
StridedLayout<2> layout(ref);
StridedLayoutView<2> ref2;
ref2 = layout;
EXPECT_EQ(layout.shape().data(), ref2.shape().data());
EXPECT_EQ(layout.byte_strides().data(), ref2.byte_strides().data());
}
StridedLayout<2> layout(std::integral_constant<DimensionIndex, 2>{});
}
TEST(StridedLayoutViewTest, CastError) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<> ref(shape, byte_strides);
EXPECT_THAT(StaticCast<StridedLayout<1>>(ref),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast strided layout with rank of 2 to "
"strided layout with rank of 1"));
}
TEST(StridedLayoutViewTest, DynamicConsructAndAssign) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<2> ref(shape, byte_strides);
{
StridedLayoutView<> r;
EXPECT_EQ(0, r.rank());
EXPECT_TRUE(r.shape().empty());
EXPECT_TRUE(r.byte_strides().empty());
}
{
StridedLayoutView<> r(shape, byte_strides);
EXPECT_EQ(2, r.rank());
EXPECT_EQ(&shape[0], r.shape().data());
EXPECT_EQ(&byte_strides[0], r.byte_strides().data());
EXPECT_EQ(2, r.shape().size());
EXPECT_EQ(2, r.byte_strides().size());
{
StridedLayoutView<> r2 = r;
EXPECT_EQ(2, r2.rank());
EXPECT_EQ(&shape[0], r2.shape().data());
EXPECT_EQ(&byte_strides[0], r2.byte_strides().data());
}
{
StridedLayoutView<> r2;
r2 = r;
EXPECT_EQ(2, r2.rank());
EXPECT_EQ(&shape[0], r2.shape().data());
EXPECT_EQ(&byte_strides[0], r2.byte_strides().data());
}
}
{
StridedLayoutView<> r = ref;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(&shape[0], r.shape().data());
EXPECT_EQ(&byte_strides[0], r.byte_strides().data());
}
{
StridedLayoutView<> r;
r = ref;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(&shape[0], r.shape().data());
EXPECT_EQ(&byte_strides[0], r.byte_strides().data());
}
{
StridedLayout<> layout(ref);
{
StridedLayoutView<> r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
{
StridedLayoutView<> r;
r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
}
{
StridedLayout<2> layout(ref);
{
StridedLayoutView<> r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
{
StridedLayoutView<> r;
r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
}
}
TEST(StridedLayoutViewTest, Static0) {
{
StridedLayoutView<0> r;
EXPECT_EQ(0, r.rank());
EXPECT_EQ(nullptr, r.shape().data());
EXPECT_EQ(nullptr, r.byte_strides().data());
}
{
StridedLayoutView<0> r;
[[maybe_unused]] StridedLayoutView<0> r2 = r;
}
{
StridedLayoutView<0> r(tensorstore::span<const Index, 0>{},
tensorstore::span<const Index, 0>{});
}
{
StridedLayout<0> layout;
StridedLayoutView<0> r = layout;
r = layout;
}
}
TEST(StridedLayoutViewDeathTest, DynamicConstruct) {
[[maybe_unused]] const Index shape[] = {5, 3};
[[maybe_unused]] const Index byte_strides[] = {3};
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
(StridedLayoutView<>(shape, byte_strides)), "shape");
StridedLayout<> x;
x.set_rank(2);
EXPECT_THAT(StaticCast<StridedLayoutView<0>>(StridedLayoutView<>(x)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(StaticCast<StridedLayoutView<0>>(x),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(StridedLayoutViewTest, Compare) {
StridedLayout<> r1(tensorstore::span<const Index>({1, 2}),
tensorstore::span<const Index>({3, 4}));
StridedLayout<> r2(tensorstore::span<const Index>({1, 2}),
tensorstore::span<const Index>({3, 4}));
StridedLayout<> r3(tensorstore::span<const Index>({1, 2, 3}),
tensorstore::span<const Index>({3, 4, 5}));
EXPECT_TRUE(r1 == r2);
EXPECT_FALSE(r1 != r2);
r1.shape()[0] = 2;
EXPECT_FALSE(r1 == r2);
EXPECT_TRUE(r1 != r2);
EXPECT_FALSE(r1 == StridedLayoutView<>{});
EXPECT_TRUE(r1 != StridedLayoutView<>{});
EXPECT_TRUE(StridedLayout<0>() == StridedLayoutView<0>());
EXPECT_FALSE(r3 == r2);
EXPECT_FALSE(r2 == r3);
EXPECT_TRUE(r2 != r3);
}
TEST(StridedLayoutViewTest, SubLayout) {
{
StridedLayout<3> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView<0>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<3>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView<1>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<2>>);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, tensorstore::StaticRank<1>{});
static_assert(std::is_same_v<decltype(s), StridedLayoutView<2>>);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<2>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<1>>);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<3>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<0>>);
EXPECT_EQ(0, s.rank());
}
}
{
StridedLayout<3> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView(r, 0);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 1);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 2);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 3);
EXPECT_EQ(0, s.rank());
}
}
{
StridedLayout<> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView<0>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView<1>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<2>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<3>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(0, s.rank());
}
}
{
StridedLayout<> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView(r, 0);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 1);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 2);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 3);
EXPECT_EQ(0, s.rank());
}
}
}
TEST(StridedLayoutViewDeathTest, SubLayout) {
StridedLayout<> r({1, 2, 3}, {3, 4, 5});
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(GetSubLayoutView(r, -1), "sub_rank");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(GetSubLayoutView(r, 4), "sub_rank");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(GetSubLayoutView<4>(r), "sub_rank");
}
TEST(StridedLayoutTest, COrderStatic) {
auto layout = StridedLayout(ContiguousLayoutOrder::c, 2,
tensorstore::span<const Index, 3>({3, 4, 5}));
static_assert(std::is_same_v<decltype(layout), StridedLayout<3>>);
EXPECT_EQ(StridedLayout<3>({3, 4, 5}, {4 * 5 * 2, 5 * 2, 2}), layout);
StridedLayout<3, offset_origin> layout_offset_origin(ContiguousLayoutOrder::c,
2, {3, 4, 5});
EXPECT_EQ((StridedLayout<3, offset_origin>({0, 0, 0}, {3, 4, 5},
{4 * 5 * 2, 5 * 2, 2})),
layout_offset_origin);
}
TEST(StridedLayoutTest, COrderDynamic) {
auto layout = StridedLayout(ContiguousLayoutOrder::c, 2,
tensorstore::span<const Index>({3, 4, 5}));
static_assert(std::is_same_v<decltype(layout), StridedLayout<>>);
EXPECT_EQ(StridedLayout<3>({3, 4, 5}, {4 * 5 * 2, 5 * 2, 2}), layout);
}
TEST(StridedLayoutTest, COrderVector) {
auto layout =
StridedLayout(ContiguousLayoutOrder::c, 2, std::vector<Index>{3, 4, 5});
static_assert(std::is_same_v<decltype(layout), StridedLayout<>>);
EXPECT_EQ(StridedLayout<3>({3, 4, 5}, {4 * 5 * 2, 5 * 2, 2}), layout);
StridedLayout<3, offset_origin> layout_offset_origin(
ContiguousLayoutOrder::c, 2, std::vector<Index>{3, 4, 5});
EXPECT_EQ((StridedLayout<3, offset_origin>({0, 0, 0}, {3, 4, 5},
{4 * 5 * 2, 5 * 2, 2})),
layout_offset_origin);
}
TEST(StridedLayoutTest, FortranOrderStatic) {
auto layout = StridedLayout(ContiguousLayoutOrder::fortran, 2, {3, 4, 5});
static_assert(std::is_same_v<decltype(layout), StridedLayout<3>>);
EXPECT_EQ(StridedLayout<3>({3, 4, 5}, {2, 3 * 2, 3 * 4 * 2}), layout);
}
TEST(StridedLayoutTest, FortranOrderDynamic) {
auto layout = StridedLayout(ContiguousLayoutOrder::fortran, 2,
tensorstore::span<const Index>({3, 4, 5}));
static_assert(std::is_same_v<decltype(layout), StridedLayout<>>);
EXPECT_EQ(StridedLayout<3>({3, 4, 5}, {2, 3 * 2, 3 * 4 * 2}), layout);
}
TEST(StridedLayoutTest, PrintToOstream) {
auto layout = StridedLayout(ContiguousLayoutOrder::fortran, 2, {3, 4, 5});
EXPECT_EQ(
"{domain={origin={0, 0, 0}, shape={3, 4, 5}}, byte_strides={2, 6, 24}}",
StrCat(layout));
}
TEST(StridedLayoutViewTest, PrintToOstream) {
auto layout = StridedLayout(ContiguousLayoutOrder::fortran, 2, {3, 4, 5});
EXPECT_EQ(
"{domain={origin={0, 0, 0}, shape={3, 4, 5}}, byte_strides={2, 6, 24}}",
StrCat(StridedLayoutView<>(layout)));
}
TEST(StridedLayoutTest, Domain) {
auto layout = StridedLayout(ContiguousLayoutOrder::fortran, 2, {3, 4, 5});
auto box = layout.domain();
static_assert(std::is_same_v<decltype(box), tensorstore::BoxView<3>>);
EXPECT_THAT(box.shape(), ::testing::ElementsAreArray({3, 4, 5}));
EXPECT_THAT(box.origin(), ::testing::ElementsAreArray({0, 0, 0}));
EXPECT_EQ(box, GetBoxDomainOf(layout));
}
TEST(StridedLayoutTest, OffsetOrigin) {
auto domain = Box({1, 2, 3}, {4, 5, 6});
auto layout = StridedLayout(ContiguousLayoutOrder::c, 2, domain);
EXPECT_EQ(domain, layout.domain());
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray({60, 12, 2}));
}
TEST(StridedLayoutTest, ConstructOffsetFromRankAndThreePointers) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout(3, origin, shape,
byte_strides);
EXPECT_EQ(layout.domain(), BoxView(origin, shape));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray(byte_strides));
}
TEST(StridedLayoutTest, ConstructOffsetFromThreeSpans) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout{
tensorstore::span(origin), tensorstore::span(shape),
tensorstore::span(byte_strides)};
EXPECT_EQ(layout.domain(), BoxView(origin, shape));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray(byte_strides));
}
TEST(StridedLayoutTest, ConstructOffsetFromTwoSpans) {
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout{
tensorstore::span(shape), tensorstore::span(byte_strides)};
EXPECT_EQ(layout.domain(), BoxView(shape));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray(byte_strides));
}
TEST(StridedLayoutTest, ConstructOffsetFromBoxAndByteStrides) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout{
BoxView(origin, shape), tensorstore::span(byte_strides)};
EXPECT_EQ(layout.domain(), BoxView(origin, shape));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray(byte_strides));
}
TEST(StridedLayoutTest, AssignOffsetOriginFromZeroOrigin) {
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout;
layout = StridedLayout<>(tensorstore::span(shape),
tensorstore::span(byte_strides));
EXPECT_EQ(layout.domain(), BoxView(shape));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray(byte_strides));
}
TEST(StridedLayoutTest, AssignOffsetOriginFromStaticOffsetOrigin) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout;
layout = StridedLayout<3, offset_origin>(origin, shape, byte_strides);
EXPECT_EQ(BoxView(origin, shape), layout.domain());
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray(byte_strides));
}
TEST(StridedLayoutTest, OffsetOriginGetSubLayoutRef) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout;
layout = StridedLayout<3, offset_origin>(origin, shape, byte_strides);
auto layout2 = GetSubLayoutView(layout, 1);
EXPECT_EQ((StridedLayout<dynamic_rank, offset_origin>(
2, origin + 1, shape + 1, byte_strides + 1)),
layout2);
}
TEST(StridedLayoutTest, Contains) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout(origin, shape,
byte_strides);
EXPECT_TRUE(Contains(layout, tensorstore::span({1, 2, 3})));
EXPECT_FALSE(Contains(layout, tensorstore::span({0, 2, 3})));
EXPECT_FALSE(Contains(layout, tensorstore::span({1, 2, 3, 4})));
EXPECT_FALSE(Contains(layout, tensorstore::span({1, 2})));
}
TEST(StridedLayoutTest, ContainsPartial) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout(origin, shape,
byte_strides);
EXPECT_TRUE(ContainsPartial(layout, tensorstore::span({1, 2, 3})));
EXPECT_FALSE(ContainsPartial(layout, tensorstore::span({0, 2, 3})));
EXPECT_FALSE(ContainsPartial(layout, tensorstore::span({1, 2, 3, 4})));
EXPECT_TRUE(ContainsPartial(layout, tensorstore::span({1, 2})));
EXPECT_FALSE(ContainsPartial(layout, tensorstore::span({0, 2})));
}
TEST(StridedLayoutTest, RankCastNoOp) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout(origin, shape,
byte_strides);
auto layout2 = StaticRankCast<dynamic_rank>(layout).value();
EXPECT_EQ(layout, layout2);
}
TEST(StridedLayoutTest, RankCastOffsetOrigin) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout(origin, shape,
byte_strides);
auto layout2 = StaticRankCast<3>(layout).value();
static_assert(
std::is_same_v<decltype(layout2), StridedLayout<3, offset_origin>>);
EXPECT_EQ(layout, layout2);
}
TEST(StridedLayoutTest, ZeroOriginByteOffset) {
StridedLayout<dynamic_rank> layout({1, 2}, {3, 4});
EXPECT_EQ(0, layout.origin_byte_offset());
}
TEST(StridedLayoutTest, OffsetOriginByteOffset) {
StridedLayout<dynamic_rank, offset_origin> layout({1, 2}, {3, 4}, {5, 6});
EXPECT_EQ(1 * 5 + 2 * 6, layout.origin_byte_offset());
}
TEST(StridedLayoutTest, DynamicLayoutCastOffsetOrigin) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout(origin, shape,
byte_strides);
auto layout2 = StaticCast<StridedLayout<3, offset_origin>>(layout).value();
EXPECT_EQ(layout, layout2);
}
TEST(StridedLayoutTest, DynamicLayoutCastNoOp) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
const Index byte_strides[] = {7, 8, 9};
StridedLayout<dynamic_rank, offset_origin> layout(origin, shape,
byte_strides);
auto layout2 =
StaticCast<StridedLayout<dynamic_rank, offset_origin>>(layout).value();
EXPECT_EQ(layout, layout2);
}
TEST(ArrayOriginKindTest, PrintToOstream) {
EXPECT_EQ("zero", StrCat(zero_origin));
EXPECT_EQ("offset", StrCat(offset_origin));
}
TEST(StridedLayoutTest, IsContiguousLayout) {
EXPECT_TRUE(IsContiguousLayout(
StridedLayout<>(ContiguousLayoutOrder::c, 2, {{3, 4, 5}}),
ContiguousLayoutOrder::c, 2));
EXPECT_FALSE(IsContiguousLayout(
StridedLayout<>(ContiguousLayoutOrder::c, 2, {{3, 4, 5}}),
ContiguousLayoutOrder::c, 3));
EXPECT_TRUE(IsContiguousLayout(
StridedLayout<>(ContiguousLayoutOrder::fortran, 2, {{3, 4, 5}}),
ContiguousLayoutOrder::fortran, 2));
EXPECT_FALSE(IsContiguousLayout(
StridedLayout<>(ContiguousLayoutOrder::c, 2, {{3, 4, 5}}),
ContiguousLayoutOrder::fortran, 2));
EXPECT_FALSE(IsContiguousLayout(
StridedLayout<>(ContiguousLayoutOrder::fortran, 2, {{3, 4, 5}}),
ContiguousLayoutOrder::c, 2));
}
TEST(StridedLayoutTest, IsBroadcastScalar) {
EXPECT_TRUE(IsBroadcastScalar(StridedLayout<>({1}, {5})));
EXPECT_FALSE(IsBroadcastScalar(StridedLayout<>({2}, {5})));
EXPECT_TRUE(IsBroadcastScalar(StridedLayout<>({2}, {0})));
EXPECT_TRUE(IsBroadcastScalar(StridedLayout<>({1, 1, 1}, {5, 10, 15})));
EXPECT_FALSE(IsBroadcastScalar(StridedLayout<>({1, 2}, {0, 5})));
EXPECT_TRUE(IsBroadcastScalar(StridedLayout<>({1, 2}, {5, 0})));
}
TEST(StridedLayoutTest, GetByteExtent) {
EXPECT_THAT(GetByteExtent(
StridedLayout<>(ContiguousLayoutOrder::c, 2, {{3, 4, 5}}), 2),
3 * 4 * 5 * 2);
EXPECT_THAT(
GetByteExtent(
StridedLayout<>(ContiguousLayoutOrder::fortran, 2, {{3, 4, 5}}), 2),
3 * 4 * 5 * 2);
EXPECT_THAT(GetByteExtent(StridedLayout<>({{1, 0, 3}}, {{2, 3, 4}}), 2), 0);
EXPECT_THAT(GetByteExtent(StridedLayout<>({{1, 5, 3}}, {{1000, -6, 4}}), 2),
30);
EXPECT_THAT(GetByteExtent(StridedLayout<>({{1, 1, 1}}, {{1000, -6, 4}}), 2),
2);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/strided_layout.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/strided_layout_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
50617f0f-f1ae-4979-a39a-9b816272d90c | cpp | google/tensorstore | static_cast | tensorstore/static_cast.cc | tensorstore/static_cast_test.cc | #include "tensorstore/static_cast.h"
#include "absl/status/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_cast {
absl::Status CastError(std::string_view source_description,
std::string_view target_description) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot cast ", source_description, " to ", target_description));
}
}
} | #include "tensorstore/static_cast.h"
#include <cstddef>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::dynamic_extent;
using ::tensorstore::IsStaticCastConstructible;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::StaticCast;
using ::tensorstore::unchecked;
using ::tensorstore::unchecked_t;
template <ptrdiff_t Extent>
struct X {
X(tensorstore::span<int, Extent> data) : data(data) {}
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent ||
Extent == dynamic_extent)>* = nullptr>
explicit X(unchecked_t, X<OtherExtent> other)
: data(other.data.data(), other.data.size()) {}
tensorstore::span<int, Extent> data;
};
template <ptrdiff_t Extent>
struct Y {
Y(tensorstore::span<int, Extent> data) : data(data) {}
tensorstore::span<int, Extent> data;
};
}
namespace tensorstore {
template <ptrdiff_t Extent>
struct StaticCastTraits<X<Extent>> : public DefaultStaticCastTraits<X<Extent>> {
template <typename Other>
static bool IsCompatible(const Other& other) {
return other.data.size() == Extent || Extent == tensorstore::dynamic_extent;
}
static std::string Describe() { return StrCat("X with extent of ", Extent); }
static std::string Describe(const X<Extent>& value) {
return StrCat("X with extent of ", value.data.size());
}
};
template <ptrdiff_t Extent>
struct StaticCastTraits<Y<Extent>> {
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent ||
Extent == dynamic_extent)>* = nullptr>
static Y<Extent> Construct(Y<OtherExtent> other) {
return Y<Extent>(
tensorstore::span<int, Extent>(other.data.data(), other.data.size()));
}
template <typename Other>
static bool IsCompatible(const Other& other) {
return other.data.size() == Extent || Extent == tensorstore::dynamic_extent;
}
static std::string Describe() { return StrCat("Y with extent of ", Extent); }
static std::string Describe(const Y<Extent>& value) {
return StrCat("Y with extent of ", value.data.size());
}
};
}
namespace {
static_assert(IsStaticCastConstructible<X<3>, X<dynamic_extent>>);
static_assert(IsStaticCastConstructible<X<dynamic_extent>, X<3>>);
static_assert(IsStaticCastConstructible<X<3>, X<3>>);
static_assert(!IsStaticCastConstructible<X<3>, X<2>>);
static_assert(IsStaticCastConstructible<Y<3>, Y<dynamic_extent>>);
static_assert(IsStaticCastConstructible<Y<dynamic_extent>, Y<3>>);
static_assert(IsStaticCastConstructible<Y<3>, Y<3>>);
static_assert(!IsStaticCastConstructible<Y<3>, Y<2>>);
static_assert(std::is_same_v<const X<3>&, decltype(StaticCast<X<3>, unchecked>(
std::declval<const X<3>&>()))>);
static_assert(std::is_same_v<X<3>&, decltype(StaticCast<X<3>, unchecked>(
std::declval<X<3>&>()))>);
static_assert(std::is_same_v<X<3>&&, decltype(StaticCast<X<3>, unchecked>(
std::declval<X<3>&&>()))>);
static_assert(
std::is_same_v<X<3>, decltype(StaticCast<X<3>, unchecked>(
std::declval<const X<dynamic_extent>&>()))>);
static_assert(std::is_same_v<X<3>, decltype(StaticCast<X<3>, unchecked>(
std::declval<X<dynamic_extent>&>()))>);
static_assert(std::is_same_v<Result<X<3>>, decltype(StaticCast<X<3>>(
std::declval<const X<3>&>()))>);
static_assert(std::is_same_v<
Result<X<3>>, decltype(StaticCast<X<3>>(std::declval<X<3>&>()))>);
static_assert(std::is_same_v<Result<X<3>>,
decltype(StaticCast<X<3>>(
std::declval<const X<dynamic_extent>&>()))>);
static_assert(
std::is_same_v<Result<X<3>>, decltype(StaticCast<X<3>>(
std::declval<X<dynamic_extent>&>()))>);
TEST(DefaultCastTraitsTest, Success) {
std::vector<int> vec{1, 2, 3};
X<dynamic_extent> x(vec);
auto cast_result = StaticCast<X<3>>(x);
static_assert(std::is_same_v<decltype(cast_result), Result<X<3>>>);
ASSERT_TRUE(cast_result);
EXPECT_EQ(vec.data(), cast_result->data.data());
auto& noop_cast_result = StaticCast<X<dynamic_extent>, unchecked>(x);
EXPECT_EQ(&noop_cast_result, &x);
auto unchecked_cast_result = StaticCast<X<3>, unchecked>(x);
static_assert(std::is_same_v<decltype(unchecked_cast_result), X<3>>);
}
TEST(DefaultCastTraitsTest, CheckedFailure) {
std::vector<int> vec{1, 2, 3};
X<dynamic_extent> x(vec);
EXPECT_THAT(
StaticCast<X<2>>(x),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast X with extent of 3 to X with extent of 2"));
}
TEST(DefaultCastTraitsDeathTest, UncheckedFailure) {
std::vector<int> vec{1, 2, 3};
X<dynamic_extent> x(vec);
EXPECT_DEBUG_DEATH((StaticCast<X<2>, unchecked>(x)),
"StaticCast is not valid");
}
TEST(CustomTraitsTest, Success) {
std::vector<int> vec{1, 2, 3};
Y<dynamic_extent> x(vec);
auto cast_result = StaticCast<Y<3>>(x);
static_assert(std::is_same_v<decltype(cast_result), Result<Y<3>>>);
ASSERT_TRUE(cast_result);
EXPECT_EQ(vec.data(), cast_result->data.data());
auto& noop_cast_result = StaticCast<Y<dynamic_extent>, unchecked>(x);
EXPECT_EQ(&noop_cast_result, &x);
auto unchecked_cast_result = StaticCast<Y<3>, unchecked>(x);
static_assert(std::is_same_v<decltype(unchecked_cast_result), Y<3>>);
}
TEST(CustomTraitsTest, CheckedFailure) {
std::vector<int> vec{1, 2, 3};
Y<dynamic_extent> x(vec);
EXPECT_THAT(
StaticCast<Y<2>>(x),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast Y with extent of 3 to Y with extent of 2"));
}
TEST(CustomTraitsDeathTest, UncheckedFailure) {
std::vector<int> vec{1, 2, 3};
Y<dynamic_extent> x(vec);
EXPECT_DEBUG_DEATH((StaticCast<Y<2>, unchecked>(x)),
"StaticCast is not valid");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/static_cast.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/static_cast_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e4f30fc4-6344-43f8-a6de-02d2dcef49d2 | cpp | google/tensorstore | resize_options | tensorstore/resize_options.cc | tensorstore/resize_options_test.cc | #include "tensorstore/resize_options.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, ResolveBoundsMode mode) {
constexpr const char* kModeNames[] = {
"fix_resizable_bounds",
};
const char* sep = "";
constexpr const char* kSep = "|";
for (size_t i = 0; i < ABSL_ARRAYSIZE(kModeNames); ++i) {
if (static_cast<int>(mode) & (1 << i)) {
os << sep << kModeNames[i];
sep = kSep;
}
}
return os;
}
std::ostream& operator<<(std::ostream& os, ResizeMode mode) {
constexpr const char* kModeNames[] = {
"resize_metadata_only",
"resize_tied_bounds",
"expand_only",
"shrink_only",
};
const char* sep = "";
constexpr const char* kSep = "|";
for (size_t i = 0; i < ABSL_ARRAYSIZE(kModeNames); ++i) {
if (static_cast<int>(mode) & (1 << i)) {
os << sep << kModeNames[i];
sep = kSep;
}
}
return os;
}
} | #include "tensorstore/resize_options.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::ResizeMode;
using ::tensorstore::ResolveBoundsMode;
using ::tensorstore::StrCat;
TEST(ResolveBoundsModeTest, PrintToOstream) {
EXPECT_EQ("fix_resizable_bounds",
StrCat(ResolveBoundsMode::fix_resizable_bounds));
EXPECT_EQ("", StrCat(ResolveBoundsMode{}));
}
TEST(ResolveBoundsModeTest, BitwiseOr) {
EXPECT_EQ(ResolveBoundsMode::fix_resizable_bounds,
ResolveBoundsMode::fix_resizable_bounds | ResolveBoundsMode{});
EXPECT_EQ(ResolveBoundsMode::fix_resizable_bounds,
ResolveBoundsMode{} | ResolveBoundsMode::fix_resizable_bounds);
}
TEST(ResizeModeTest, PrintToOstream) {
EXPECT_EQ(
"resize_metadata_only|resize_tied_bounds|expand_only|shrink_only",
StrCat(ResizeMode::resize_metadata_only | ResizeMode::resize_tied_bounds |
ResizeMode::expand_only | ResizeMode::shrink_only));
EXPECT_EQ("", StrCat(ResizeMode{}));
}
TEST(ResizeModeTest, BitwiseOr) {
EXPECT_EQ(ResizeMode::resize_metadata_only,
ResizeMode::resize_metadata_only | ResizeMode{});
EXPECT_EQ(ResizeMode::resize_metadata_only,
ResizeMode{} | ResizeMode::resize_metadata_only);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/resize_options.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/resize_options_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
752315be-50a6-4e68-908c-f2f56b92567d | cpp | google/tensorstore | box | tensorstore/box.cc | tensorstore/box_test.cc | #include "tensorstore/box.h"
#include <algorithm>
#include <ostream>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_box {
std::string DescribeForCast(DimensionIndex rank) {
return tensorstore::StrCat("box with ",
StaticCastTraits<DimensionIndex>::Describe(rank));
}
std::ostream& PrintToOstream(std::ostream& os, const BoxView<>& view) {
return os << "{origin=" << view.origin() << ", shape=" << view.shape() << "}";
}
bool AreEqual(const BoxView<>& box_a, const BoxView<>& box_b) {
return box_a.rank() == box_b.rank() &&
std::equal(box_a.shape().begin(), box_a.shape().end(),
box_b.shape().begin()) &&
std::equal(box_a.origin().begin(), box_a.origin().end(),
box_b.origin().begin());
}
bool IsFinite(BoxView<> box) {
for (DimensionIndex i = 0; i < box.rank(); ++i) {
if (!IsFinite(box[i])) return false;
}
return true;
}
}
namespace serialization {
namespace internal_serialization {
bool EncodeBoxView(EncodeSink& sink, BoxView<> box) {
return serialization::EncodeTuple(sink, box.origin(), box.shape());
}
bool DecodeBoxView(DecodeSource& source, MutableBoxView<> box) {
return serialization::DecodeTuple(source, box.origin(), box.shape());
}
}
bool RankSerializer::Encode(EncodeSink& sink, DimensionIndex rank) {
assert(IsValidRank(rank));
return sink.writer().WriteByte(static_cast<uint8_t>(rank));
}
bool RankSerializer::Decode(DecodeSource& source, DimensionIndex& rank) {
uint8_t v;
if (!source.reader().ReadByte(v)) return false;
if (v > kMaxRank) {
source.Fail(DecodeError(
tensorstore::StrCat("Invalid rank value: ", static_cast<size_t>(v))));
}
rank = static_cast<DimensionIndex>(v);
return true;
}
}
} | #include "tensorstore/box.h"
#include <array>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::dynamic_rank;
using ::tensorstore::HasBoxDomain;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IsStaticCastConstructible;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MutableBoxView;
using ::tensorstore::StaticRankCast;
using ::tensorstore::SubBoxView;
using ::tensorstore::unchecked;
using ::tensorstore::serialization::TestSerializationRoundTrip;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
static_assert(std::is_convertible_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_constructible_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_assignable_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_assignable_v<Box<3>, Box<>>);
static_assert(!std::is_constructible_v<Box<3>, Box<>>);
static_assert(!std::is_constructible_v<BoxView<3>, Box<>>);
static_assert(!std::is_constructible_v<MutableBoxView<3>, MutableBoxView<>>);
static_assert(!std::is_constructible_v<MutableBoxView<3>, Box<>>);
static_assert(std::is_constructible_v<MutableBoxView<3>, Box<3>&>);
static_assert(IsStaticCastConstructible<BoxView<3>, BoxView<>>);
static_assert(IsStaticCastConstructible<Box<3>, BoxView<>>);
static_assert(IsStaticCastConstructible<Box<3>, Box<>>);
static_assert(IsStaticCastConstructible<BoxView<>, BoxView<3>>);
static_assert(IsStaticCastConstructible<MutableBoxView<3>, Box<3>&>);
static_assert(!IsStaticCastConstructible<MutableBoxView<>, const Box<3>&>);
static_assert(!IsStaticCastConstructible<BoxView<2>, BoxView<3>>);
static_assert(!IsStaticCastConstructible<BoxView<2>, Box<3>>);
static_assert(!IsStaticCastConstructible<Box<3>, Box<2>>);
TEST(BoxTest, DefaultConstructDynamic) {
Box<> box;
EXPECT_EQ(0, box.rank());
}
TEST(BoxTest, DefaultConstructStatic) {
Box<3> box;
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, RankPointersConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> box(3, origin, shape);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(4, 5, 6));
}
TEST(BoxTest, SizeConstruct) {
Box<> box(3);
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, ShapeArrayConstruct) {
std::array<Index, 3> shape{{1, 2, 3}};
Box<> box(shape);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(1, 2, 3));
}
TEST(BoxTest, DynamicRankSpanConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<> box{tensorstore::span(origin), tensorstore::span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin, ElementsAreArray(origin));
EXPECT_THAT(shape, ElementsAreArray(shape));
}
TEST(BoxTest, ConstructFromArrays) {
Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(4, 5, 6));
}
TEST(BoxTest, ConstructFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
BoxView<> view(origin, shape);
Box<> box(view);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, DeduceFromShapeArray) {
const Index shape[] = {3, 4, 5};
auto box = Box(shape);
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromShapeSpanStatic) {
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromShapeSpanDynamic) {
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), Box<>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeArrays) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(origin, shape);
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeSpansStatic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span(origin), tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span<const Index>(origin),
tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), Box<>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
auto box2 = Box(box);
static_assert(std::is_same_v<decltype(box2), Box<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<3> box(origin, shape);
auto box2 = Box(box);
static_assert(std::is_same_v<decltype(box2), Box<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
BoxView<> view(origin, shape);
Box<> box;
box = view;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> other(origin, shape);
Box<> box;
box = other;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignDynamicBoxFromStaticBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<3> other(origin, shape);
Box<> box;
box = other;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
box.Fill();
box = BoxView<3>(other);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignStaticBoxFromDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> other(origin, shape);
Box<3> box;
box = StaticRankCast<3, unchecked>(other);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, SetRank) {
Box<> box;
box.set_rank(3);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, Accessors) {
Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_EQ(4 * 5 * 6, box.num_elements());
EXPECT_EQ(IndexInterval::UncheckedSized(1, 4), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 5), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 6), box[2]);
}
TEST(BoxTest, ConstAccessors) {
const Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_EQ(4 * 5 * 6, box.num_elements());
EXPECT_EQ(IndexInterval::UncheckedSized(1, 4), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 5), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 6), box[2]);
}
TEST(BoxTest, SubscriptAssignment) {
Box<> box(2);
box[1] = IndexInterval::UncheckedSized(1, 5);
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, 1));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, 5));
}
TEST(BoxTest, Fill) {
Box<> box(2);
box.Fill(IndexInterval::UncheckedSized(1, 5));
EXPECT_THAT(box.origin(), ElementsAre(1, 1));
EXPECT_THAT(box.shape(), ElementsAre(5, 5));
}
TEST(BoxTest, IsEmpty) {
Box<> box(3);
EXPECT_FALSE(box.is_empty());
box.Fill(IndexInterval::UncheckedSized(0, 0));
EXPECT_TRUE(box.is_empty());
}
TEST(BoxViewTest, StaticRankDefaultConstruct) {
BoxView<3> box;
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxViewTest, DynamicRankDefaultConstruct) {
BoxView<> box;
EXPECT_EQ(0, box.rank());
}
TEST(BoxViewTest, DynamicRankSizeConstruct) {
BoxView<> box(3);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxViewTest, DynamicRankSpanConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box{tensorstore::span(origin), tensorstore::span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_EQ(&origin[0], box.origin().data());
EXPECT_EQ(&shape[0], box.shape().data());
}
TEST(BoxViewTest, DeduceFromShapeArray) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(shape);
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromShapeSpanStatic) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromShapeSpanDynamic) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), BoxView<>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeArrays) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(origin, shape);
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeSpansStatic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span(origin), tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span<const Index>(origin),
tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), BoxView<>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
const Box<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, Subscript) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
EXPECT_EQ(IndexInterval::UncheckedSized(1, 3), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 4), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 5), box[2]);
}
TEST(BoxViewTest, NumElements) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
EXPECT_EQ(3 * 4 * 5, box.num_elements());
}
TEST(BoxViewTest, StaticToDynamicConversion) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<> dynamic_box = box;
EXPECT_EQ(3, dynamic_box.rank());
EXPECT_THAT(dynamic_box.shape(), ElementsAreArray(shape));
EXPECT_THAT(dynamic_box.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DefaultAssignment) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<3> box2;
box2 = box;
EXPECT_EQ(3, box2.rank());
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DefaultAssignmentStaticToDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<> box2;
box2 = box;
EXPECT_EQ(3, box2.rank());
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, StaticRankCast) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
auto box2 = StaticRankCast<3, unchecked>(box);
EXPECT_THAT(
StaticRankCast<2>(box),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast box with rank of 3 to box with rank of 2"));
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, ConstructFromDynamicBox) {
Box<> box({1, 2}, {3, 4});
BoxView<> box_view = box;
EXPECT_EQ(2, box_view.rank());
EXPECT_EQ(box.shape().data(), box_view.shape().data());
EXPECT_EQ(box.origin().data(), box_view.origin().data());
}
TEST(BoxViewTest, ConstructFromStaticBox) {
Box<2> box({1, 2}, {3, 4});
BoxView<> box_view = box;
EXPECT_EQ(2, box_view.rank());
EXPECT_EQ(box.shape().data(), box_view.shape().data());
EXPECT_EQ(box.origin().data(), box_view.origin().data());
}
TEST(MutableBoxViewTest, RankPointersConstruct) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(3, origin, shape);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DynamicRankSpanConstruct) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<> box{tensorstore::span(origin), tensorstore::span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeArrays) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(origin, shape);
static_assert(std::is_same_v<decltype(box), MutableBoxView<3>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeSpansStatic) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span(origin), tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), MutableBoxView<3>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeDynamic) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span<Index>(origin),
tensorstore::span<Index>(shape));
static_assert(std::is_same_v<decltype(box), MutableBoxView<>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_EQ(box2.shape().data(), box.shape().data());
EXPECT_EQ(box2.origin().data(), box.origin().data());
}
TEST(MutableBoxViewTest, DeduceFromMutableBoxView) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_EQ(box2.shape().data(), box.shape().data());
EXPECT_EQ(box2.origin().data(), box.origin().data());
}
TEST(MutableBoxViewTest, AssignFromBoxView) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
const Index origin2[] = {10, 20, 30};
const Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(BoxView(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, AssignFromBox) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
const Index origin2[] = {10, 20, 30};
const Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(Box(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, CopyAssign) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
Index origin2[] = {10, 20, 30};
Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(MutableBoxView<>(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, SubscriptAssignment) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(origin, shape);
box[1] = IndexInterval::UncheckedSized(1, 7);
EXPECT_THAT(origin, ElementsAre(1, 1, 3));
EXPECT_THAT(shape, ElementsAre(4, 7, 6));
}
TEST(MutableBoxViewTest, Fill) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(origin, shape);
box.Fill(IndexInterval::UncheckedSized(1, 5));
EXPECT_THAT(box.origin(), ElementsAre(1, 1, 1));
EXPECT_THAT(box.shape(), ElementsAre(5, 5, 5));
box.Fill();
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(MutableBoxViewTest, StaticRankCast) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<> box(origin, shape);
auto box2 = StaticRankCast<3, unchecked>(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, Comparison) {
const Index origin1[] = {1, 2, 3};
const Index shape1[] = {4, 5, 6};
const Index origin2[] = {1, 2, 3};
const Index shape2[] = {4, 5, 6};
const Index origin3[] = {1, 2, 4};
const Index shape3[] = {4, 5, 7};
const Index origin4[] = {1, 2};
const Index shape4[] = {4, 5};
BoxView<> view1(origin1, shape1);
Box<> box1(view1);
BoxView<> view2(origin2, shape2);
Box<> box2(view2);
BoxView<> view3(origin3, shape3);
Box<> box3(view3);
BoxView<> view4(origin4, shape4);
Box<> box4(view4);
EXPECT_EQ(box1, view1);
EXPECT_EQ(box2, view2);
EXPECT_EQ(box3, view3);
EXPECT_EQ(box4, view4);
EXPECT_EQ(view1, view2);
EXPECT_EQ(view1, box2);
EXPECT_EQ(box1, view2);
EXPECT_EQ(box1, box2);
EXPECT_NE(view1, view3);
EXPECT_NE(view1, box3);
EXPECT_NE(box1, view3);
EXPECT_NE(box1, box3);
EXPECT_NE(view1, view4);
EXPECT_NE(view1, box4);
EXPECT_NE(box1, view4);
EXPECT_NE(box1, box4);
}
TEST(BoxTest, Print) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(BoxView<>(origin, shape)));
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(Box<>(origin, shape)));
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(MutableBoxView<>(origin, shape)));
}
TEST(BoxTest, Contains) {
const Index origin1[] = {1, 2};
const Index shape1[] = {4, 5};
const Index origin2[] = {2, 2};
const Index shape2[] = {3, 5};
const Index origin3[] = {1, 2};
const Index shape3[] = {4, 6};
const Index origin4[] = {1};
const Index shape4[] = {4};
const Index indices1[] = {2, 3};
const Index indices2[] = {0, 3};
const Index indices3[] = {0};
Index indices4[] = {2};
auto span1 = tensorstore::span(indices1);
auto span2 = tensorstore::span(indices2);
auto span3 = tensorstore::span(indices3);
auto span4 = tensorstore::span(indices4);
BoxView<> view1(origin1, shape1);
BoxView<> view2(origin2, shape2);
BoxView<> view3(origin3, shape3);
BoxView<> view4(origin4, shape4);
Box<> box1(origin1, shape1);
Box<> box2(origin2, shape2);
Box<> box3(origin3, shape3);
Box<> box4(origin4, shape4);
EXPECT_TRUE(Contains(view1, indices1));
EXPECT_TRUE(ContainsPartial(view1, indices1));
EXPECT_TRUE(ContainsPartial(view1, indices4));
EXPECT_FALSE(Contains(view1, indices2));
EXPECT_FALSE(Contains(view1, indices3));
EXPECT_FALSE(ContainsPartial(view1, indices2));
EXPECT_FALSE(ContainsPartial(view1, indices3));
EXPECT_TRUE(Contains(view1, span1));
EXPECT_TRUE(ContainsPartial(view1, span1));
EXPECT_FALSE(Contains(view1, span2));
EXPECT_FALSE(ContainsPartial(view1, span2));
EXPECT_FALSE(Contains(view1, span3));
EXPECT_FALSE(ContainsPartial(view1, span3));
EXPECT_TRUE(ContainsPartial(view1, span4));
EXPECT_TRUE(Contains(box1, indices1));
EXPECT_TRUE(ContainsPartial(box1, indices1));
EXPECT_FALSE(Contains(box1, indices2));
EXPECT_FALSE(Contains(box1, indices3));
EXPECT_TRUE(Contains(box1, span1));
EXPECT_FALSE(Contains(box1, span2));
EXPECT_FALSE(Contains(box1, span3));
EXPECT_TRUE(Contains(view1, view2));
EXPECT_FALSE(Contains(view1, view3));
EXPECT_FALSE(Contains(view1, view4));
EXPECT_TRUE(Contains(view1, box2));
EXPECT_FALSE(Contains(view1, box3));
EXPECT_FALSE(Contains(view1, box4));
EXPECT_TRUE(Contains(box1, view2));
EXPECT_FALSE(Contains(box1, view3));
EXPECT_FALSE(Contains(box1, view4));
EXPECT_TRUE(Contains(box1, box2));
EXPECT_FALSE(Contains(box1, box3));
EXPECT_FALSE(Contains(box1, box4));
}
TEST(BoxTest, GetBoxDomainOf) {
static_assert(!HasBoxDomain<int>);
static_assert(HasBoxDomain<BoxView<>>);
static_assert(HasBoxDomain<Box<>>);
static_assert(HasBoxDomain<MutableBoxView<>>);
Box<> box({1, 2}, {3, 4});
BoxView<> view = box;
EXPECT_EQ(box, GetBoxDomainOf(box));
EXPECT_EQ(box, GetBoxDomainOf(view));
}
TEST(BoxTest, InlineSize) {
Box<dynamic_rank(2)> box({1, 2}, {3, 4});
BoxView<dynamic_rank> v = box;
EXPECT_EQ(v, box);
MutableBoxView<dynamic_rank> v2 = box;
EXPECT_EQ(v2, box);
}
TEST(BoxTest, DeductionGuides) {
auto box = Box({1, 2}, {3, 4});
static_assert(std::is_same_v<decltype(box), Box<2>>);
static_assert(std::is_same_v<decltype(BoxView({1, 2}, {3, 4})), BoxView<2>>);
static_assert(decltype(box)::static_rank == 2);
auto box_view = BoxView(box);
static_assert(std::is_same_v<decltype(box_view), MutableBoxView<2>>);
}
TEST(BoxTest, IsFinite) {
EXPECT_TRUE(IsFinite(Box<>()));
EXPECT_TRUE(IsFinite(BoxView<>()));
EXPECT_FALSE(IsFinite(Box<>(1)));
EXPECT_FALSE(IsFinite(Box<1>()));
EXPECT_FALSE(IsFinite(BoxView<>(1)));
EXPECT_FALSE(IsFinite(BoxView<>(2)));
EXPECT_FALSE(IsFinite(BoxView<2>()));
EXPECT_TRUE(IsFinite(Box<3>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(BoxView<3>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(Box<>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(BoxView<>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(Box<1>({1}, {4})));
EXPECT_FALSE(IsFinite(Box<3>({1, -kInfIndex, 3}, {4, 5, 6})));
EXPECT_FALSE(IsFinite(Box<3>({1, kInfIndex - 5, 3}, {4, 6, 6})));
}
TEST(BoxSerializationTest, StaticRank) {
TestSerializationRoundTrip(Box<0>());
TestSerializationRoundTrip(Box<3>({1, 2, 3}, {4, 5, 6}));
}
TEST(BoxSerializationTest, DynamicRank) {
TestSerializationRoundTrip(Box<>());
TestSerializationRoundTrip(Box({1, 2, 3}, {4, 5, 6}));
}
TEST(BoxTest, SubBoxView) {
Box<> b({1, 2, 3}, {4, 5, 6});
const Box<>& b_const = b;
BoxView<> b_view = b;
MutableBoxView<> b_mut_view = b;
EXPECT_EQ(Box<>({2, 3}, {5, 6}), SubBoxView(b, 1));
EXPECT_EQ(Box<>({2}, {5}), SubBoxView(b, 1, 2));
static_assert(std::is_same_v<decltype(SubBoxView(b, 1)), MutableBoxView<>>);
static_assert(std::is_same_v<decltype(SubBoxView(b_const, 1)), BoxView<>>);
static_assert(std::is_same_v<decltype(SubBoxView(b_view, 1)), BoxView<>>);
static_assert(
std::is_same_v<decltype(SubBoxView(b_mut_view, 1)), MutableBoxView<>>);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/box.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/box_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
0ba8a671-bd6a-4936-ab30-7569bb8b140f | cpp | google/tensorstore | proto_binder | tensorstore/proto/proto_binder.cc | tensorstore/proto/proto_binder_test.cc | #include "tensorstore/proto/proto_binder.h"
#include <string>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
#include "google/protobuf/util/json_util.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/proto/proto_util.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
absl::Status JsonProtoBinderBase::operator()(std::true_type ,
const NoOptions& options,
google::protobuf::Message* obj,
::nlohmann::json* j) const {
if (!j->template get_ptr<::nlohmann::json::object_t*>()) {
return internal_json::ExpectedError(*j, "object");
}
std::string json_ascii = j->dump();
auto status = google::protobuf::util::JsonStringToMessage(json_ascii, obj);
if (status.ok()) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected JSON protocol buffer ", obj->GetDescriptor()->name(),
" object, but received ", j->dump(), " with error ",
std::string_view(status.message().data(), status.message().size())));
}
absl::Status JsonProtoBinderBase::operator()(std::false_type ,
const NoOptions& options,
const google::protobuf::Message* obj,
::nlohmann::json* j) const {
std::string json_ascii;
auto status = google::protobuf::util::MessageToJsonString(*obj, &json_ascii);
if (!status.ok()) {
return absl::InternalError(
std::string_view(status.message().data(), status.message().size()));
}
auto j_parse = ::nlohmann::json::parse(json_ascii, nullptr, false);
if (j_parse.template get_ptr<::nlohmann::json::object_t*>()) {
*j = std::move(j_parse);
return absl::OkStatus();
}
return absl::InternalError("Failed to serialize field as JSON proto");
}
absl::Status AsciiProtoBinderBase::operator()(std::true_type,
const NoOptions& options,
google::protobuf::Message* obj,
::nlohmann::json* j) const {
auto* str = j->template get_ptr<const std::string*>();
if (!str) {
return internal_json::ExpectedError(*j, "string");
}
if (TryParseTextProto(*str, obj)) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ASCII protocol buffer ", obj->GetDescriptor()->name(),
" object, but received ", *str));
}
absl::Status AsciiProtoBinderBase::operator()(std::false_type,
const NoOptions& options,
const google::protobuf::Message* obj,
::nlohmann::json* j) const {
std::string obj_text;
google::protobuf::TextFormat::PrintToString(*obj, &obj_text);
*j = obj_text;
return absl::OkStatus();
}
}
} | #include "tensorstore/proto/proto_binder.h"
#include <string>
#include <type_traits>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::JsonSerializationOptions;
using ::tensorstore::internal_json_binding::AsciiProtoBinder;
using ::tensorstore::internal_json_binding::JsonProtoBinder;
static inline constexpr JsonProtoBinder<::tensorstore::proto::Array>
ArrayJsonBinder = {};
static inline constexpr AsciiProtoBinder<::tensorstore::proto::Array>
ArrayAsciiBinder = {};
constexpr const char kProto[] = R"(dtype: "int64"
shape: 1
shape: 2
shape: 4
int_data: 1
int_data: 0
int_data: 2
int_data: 2
int_data: 4
int_data: 5
int_data: 6
int_data: 7
)";
TEST(ProtoBinderTest, Ascii) {
JsonSerializationOptions options;
::tensorstore::proto::Array proto;
::nlohmann::json j = std::string(kProto);
EXPECT_TRUE(ArrayAsciiBinder(std::true_type{}, options, &proto, &j).ok());
EXPECT_THAT(proto, EqualsProto(kProto));
::nlohmann::json out;
EXPECT_TRUE(ArrayAsciiBinder(std::false_type{}, options, &proto, &out).ok());
ASSERT_TRUE(out.get_ptr<const std::string*>());
EXPECT_EQ(*out.get_ptr<const std::string*>(), kProto);
}
TEST(ProtoBinderTest, Json) {
JsonSerializationOptions options;
::tensorstore::proto::Array proto;
::nlohmann::json j = ::nlohmann::json{{"dtype", "int64"},
{"shape", {1, 2, 4}},
{"int_data", {1, 0, 2, 2, 4, 5, 6, 7}}};
EXPECT_TRUE(ArrayJsonBinder(std::true_type{}, options, &proto, &j).ok());
EXPECT_THAT(proto, EqualsProto(kProto));
::nlohmann::json out;
EXPECT_TRUE(ArrayJsonBinder(std::false_type{}, options, &proto, &out).ok());
::nlohmann::json expected{
{"dtype", "int64"},
{"shape", {"1", "2", "4"}},
{"intData", {"1", "0", "2", "2", "4", "5", "6", "7"}}};
EXPECT_EQ(out, expected);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_binder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_binder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
37cc81f3-453c-4552-97e5-0cd53e708bdd | cpp | google/tensorstore | proto_util | tensorstore/proto/proto_util.cc | tensorstore/proto/proto_util_test.cc | #include "tensorstore/proto/proto_util.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "google/protobuf/io/tokenizer.h"
#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
namespace tensorstore {
namespace {
class ErrorCollector : public google::protobuf::io::ErrorCollector {
public:
ErrorCollector() = default;
~ErrorCollector() override = default;
void RecordError(int line, google::protobuf::io::ColumnNumber column,
absl::string_view message) override {
errors.emplace_back(absl::StrCat("Line: ", std::max(1, line + 1),
", col: ", column + 1, ": ", message));
}
void RecordWarning(int line, google::protobuf::io::ColumnNumber column,
absl::string_view message) override {
errors.emplace_back(absl::StrCat("Line: ", std::max(1, line + 1),
", col: ", column + 1, ": ", message));
}
std::vector<std::string> errors;
};
class ConcisePrinter : public google::protobuf::TextFormat::FastFieldValuePrinter {
public:
void PrintString(
const std::string& val,
google::protobuf::TextFormat::BaseTextGenerator* generator) const override {
if (val.size() <= 80) {
FastFieldValuePrinter::PrintString(val, generator);
return;
}
std::string output = absl::StrFormat("<%d bytes: ", val.size());
for (size_t i = 0; i < 8; i++) {
absl::StrAppendFormat(&output, "\\x%02x", val[i]);
}
absl::StrAppend(&output, "...>");
generator->PrintString(output);
}
};
}
bool TryParseTextProto(absl::string_view asciipb, google::protobuf::Message* msg,
std::vector<std::string>* errors,
bool allow_partial_messages,
bool allow_unknown_extensions) {
google::protobuf::TextFormat::Parser parser;
parser.AllowPartialMessage(allow_partial_messages);
parser.AllowUnknownExtension(allow_unknown_extensions);
ErrorCollector error_collector;
parser.RecordErrorsTo(&error_collector);
google::protobuf::io::ArrayInputStream asciipb_istream(asciipb.data(), asciipb.size());
if (parser.Parse(&asciipb_istream, msg)) {
return true;
}
msg->Clear();
if (errors) {
*errors = std::move(error_collector.errors);
}
return false;
}
std::string ConciseDebugString(const google::protobuf::Message& message) {
google::protobuf::TextFormat::Printer printer;
printer.SetDefaultFieldValuePrinter(new ConcisePrinter());
printer.SetSingleLineMode(true);
printer.SetExpandAny(true);
std::string debugstring;
printer.PrintToString(message, &debugstring);
if (!debugstring.empty() && debugstring.back() == ' ') {
debugstring.pop_back();
}
return debugstring;
}
} | #include "tensorstore/proto/proto_util.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::ConciseDebugString;
using ::tensorstore::TryParseTextProto;
TEST(ProtoUtilTest, Basic) {
constexpr const char kProto[] = R"pb(
dtype: "int64"
shape: [ 1, 2, 4 ]
int_data: [ 1, 0, 2, 2, 4, 5, 6, 7 ]
)pb";
::tensorstore::proto::Array proto;
EXPECT_TRUE(TryParseTextProto(kProto, &proto));
EXPECT_THAT(proto, EqualsProto(kProto));
std::vector<std::string> errors;
EXPECT_FALSE(TryParseTextProto("a: 'foo'", &proto, &errors));
EXPECT_FALSE(errors.empty());
}
TEST(ProtoUtilTest, ConciseDebugString) {
::tensorstore::proto::Array proto;
proto.set_dtype("int64");
proto.set_void_data(
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}");
EXPECT_EQ(
"dtype: \"int64\" "
"void_data: <256 bytes: \\x7b\\x30\\x31\\x32\\x33\\x34\\x35\\x36...>",
ConciseDebugString(proto));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3a36959a-e048-406a-8180-3a5c2b174527 | cpp | google/tensorstore | encode_time | tensorstore/proto/encode_time.cc | tensorstore/proto/encode_time_test.cc | #include "tensorstore/proto/encode_time.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal {
void AbslTimeToProto(absl::Time t, google::protobuf::Timestamp* proto) {
if (t == absl::InfiniteFuture()) {
proto->set_seconds(0x7FFFFFFFFFFFFFFFll);
proto->set_nanos(0);
} else if (t == absl::InfinitePast()) {
proto->set_seconds(0x8000000000000000ll);
proto->set_nanos(0);
} else {
const int64_t s = absl::ToUnixSeconds(t);
const int64_t n = (t - absl::FromUnixSeconds(s)) / absl::Nanoseconds(1);
proto->set_seconds(s);
proto->set_nanos(n);
}
}
tensorstore::Result<absl::Time> ProtoToAbslTime(
const google::protobuf::Timestamp& proto) {
const auto sec = proto.seconds();
const auto ns = proto.nanos();
if (sec == 0x7FFFFFFFFFFFFFFFll) {
return absl::InfiniteFuture();
}
if (sec == 0x8000000000000000ll) {
return absl::InfinitePast();
}
if (sec < -62135596800 || sec > 253402300799) {
return absl::InvalidArgumentError(tensorstore::StrCat("seconds=", sec));
}
if (ns < 0 || ns > 999999999) {
return absl::InvalidArgumentError(tensorstore::StrCat("nanos=", ns));
}
return absl::FromUnixSeconds(sec) + absl::Nanoseconds(ns);
}
void AbslDurationToProto(absl::Duration d, google::protobuf::Duration* proto) {
if (d == absl::InfiniteDuration()) {
proto->set_seconds(0x7FFFFFFFFFFFFFFFll);
proto->set_nanos(0);
} else if (d == -absl::InfiniteDuration()) {
proto->set_seconds(0x8000000000000000ll);
proto->set_nanos(0);
} else {
const int64_t s = absl::IDivDuration(d, absl::Seconds(1), &d);
const int64_t n = absl::IDivDuration(d, absl::Nanoseconds(1), &d);
proto->set_seconds(s);
proto->set_nanos(n);
}
}
Result<absl::Duration> ProtoToAbslDuration(
const google::protobuf::Duration& proto) {
const auto sec = proto.seconds();
if (sec == 0x7FFFFFFFFFFFFFFFll) {
return absl::InfiniteDuration();
}
if (sec == 0x8000000000000000ll) {
return -absl::InfiniteDuration();
}
const auto ns = proto.nanos();
if (sec < -315576000000 || sec > 315576000000) {
return absl::InvalidArgumentError(tensorstore::StrCat("seconds=", sec));
}
if (ns < -999999999 || ns > 999999999) {
return absl::InvalidArgumentError(tensorstore::StrCat("nanos=", ns));
}
if ((sec < 0 && ns > 0) || (sec > 0 && ns < 0)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Sign mismatch between seconds=", sec, ", nanos=", ns));
}
return absl::Seconds(sec) + absl::Nanoseconds(ns);
}
}
} | #include "tensorstore/proto/encode_time.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include <gtest/gtest.h>
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::internal::AbslDurationToProto;
using ::tensorstore::internal::AbslTimeToProto;
using ::tensorstore::internal::ProtoToAbslDuration;
using ::tensorstore::internal::ProtoToAbslTime;
TEST(EncodeTimestamp, Basic) {
auto roundtrip = [](absl::Time ts) {
google::protobuf::Timestamp proto;
AbslTimeToProto(ts, &proto);
return ProtoToAbslTime(proto);
};
tensorstore::Result<absl::Time> result;
result = roundtrip(absl::InfinitePast());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(absl::InfinitePast(), *result);
result = roundtrip(absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(absl::InfiniteFuture(), *result);
auto now = absl::Now();
result = roundtrip(now);
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(now, *result);
}
TEST(EncodeDuration, Basic) {
auto roundtrip = [](absl::Duration d) {
google::protobuf::Duration proto;
AbslDurationToProto(d, &proto);
return ProtoToAbslDuration(proto);
};
auto test_roundtrip = [&](absl::Duration d) {
SCOPED_TRACE(tensorstore::StrCat("duration=", d));
EXPECT_THAT(roundtrip(d), ::testing::Optional(d));
};
test_roundtrip(absl::InfiniteDuration());
test_roundtrip(-absl::InfiniteDuration());
test_roundtrip(absl::Seconds(5));
test_roundtrip(absl::Seconds(-5));
test_roundtrip(absl::ZeroDuration());
test_roundtrip(absl::Milliseconds(12345));
test_roundtrip(absl::Milliseconds(-12345));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/encode_time.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/encode_time_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ed05a639-8611-4b50-bf09-f303ce4b0779 | cpp | google/tensorstore | index_transform | tensorstore/index_space/index_transform.cc | tensorstore/index_space/index_transform_test.cc | #include "tensorstore/index_space/index_transform.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <limits>
#include <numeric>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/json.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
std::string DescribeTransformForCast(DimensionIndex input_rank,
DimensionIndex output_rank) {
return tensorstore::StrCat(
"index transform with input ",
StaticCastTraits<DimensionIndex>::Describe(input_rank), " and output ",
StaticCastTraits<DimensionIndex>::Describe(output_rank));
}
std::string DescribeDomainForCast(DimensionIndex rank) {
return tensorstore::StrCat("index domain with ",
StaticCastTraits<DimensionIndex>::Describe(rank));
}
Result<IndexTransform<>> SliceByIndexDomain(IndexTransform<> transform,
IndexDomainView<> domain) {
using internal_index_space::TransformAccess;
assert(transform.valid());
assert(domain.valid());
TransformRep::Ptr<> rep =
MutableRep(TransformAccess::rep_ptr<container>(std::move(transform)));
const DimensionIndex slice_rank = domain.rank();
const DimensionIndex input_rank = rep->input_rank;
const span<const std::string> domain_labels = domain.labels();
const span<std::string> transform_labels =
rep->input_labels().first(input_rank);
DimensionIndex transform_dims[kMaxRank];
const bool domain_unlabeled =
internal_index_space::IsUnlabeled(domain_labels);
if (domain_unlabeled || internal_index_space::IsUnlabeled(transform_labels)) {
if (slice_rank != input_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank of index domain (", slice_rank,
") must match rank of slice target (", input_rank,
") when the index domain or slice target is unlabeled"));
}
std::iota(&transform_dims[0], &transform_dims[slice_rank],
DimensionIndex(0));
if (!domain_unlabeled) {
std::copy_n(domain_labels.begin(), slice_rank, transform_labels.begin());
}
} else {
DimensionIndex next_potentially_unlabeled_dim = 0;
for (DimensionIndex i = 0; i < slice_rank; ++i) {
std::string_view label = domain_labels[i];
DimensionIndex j;
if (!label.empty()) {
TENSORSTORE_ASSIGN_OR_RETURN(
j, NormalizeDimensionLabel(label, transform_labels));
} else {
while (true) {
if (next_potentially_unlabeled_dim == input_rank) {
return absl::InvalidArgumentError(
"Number of unlabeled dimensions in index domain exceeds number "
"of unlabeled dimensions in slice target");
}
if (transform_labels[next_potentially_unlabeled_dim].empty()) {
j = next_potentially_unlabeled_dim++;
break;
}
++next_potentially_unlabeled_dim;
}
}
transform_dims[i] = j;
}
if (next_potentially_unlabeled_dim != 0 && input_rank != slice_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank (", slice_rank,
") of index domain containing unlabeled dimensions must "
"equal slice target rank (",
input_rank, ")"));
}
}
bool domain_is_empty = false;
for (DimensionIndex i = 0; i < slice_rank; ++i) {
const DimensionIndex j = transform_dims[i];
const internal_index_space::InputDimensionRef d = rep->input_dimension(j);
const IndexInterval orig_domain =
d.optionally_implicit_domain().effective_interval();
const IndexInterval new_domain = domain[i];
if (!Contains(orig_domain, new_domain)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Cannot slice target dimension ", j, " {",
d.index_domain_dimension<view>(), "} with index domain dimension ", i,
" {", domain[i], "}"));
}
if (new_domain.empty()) domain_is_empty = true;
d.domain() = new_domain;
d.implicit_lower_bound() = false;
d.implicit_upper_bound() = false;
}
if (domain_is_empty) {
ReplaceAllIndexArrayMapsWithConstantMaps(rep.get());
}
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> SliceByBox(IndexTransform<> transform,
BoxView<> domain) {
using internal_index_space::TransformAccess;
assert(transform.valid());
if (transform.input_rank() != domain.rank()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Rank of index domain (", transform.input_rank(),
") must match rank of box (", domain.rank(), ")"));
}
TransformRep::Ptr<> rep =
MutableRep(TransformAccess::rep_ptr<container>(std::move(transform)));
bool domain_is_empty = false;
for (DimensionIndex i = 0; i < domain.rank(); ++i) {
const internal_index_space::InputDimensionRef d = rep->input_dimension(i);
const IndexInterval orig_domain =
d.optionally_implicit_domain().effective_interval();
const IndexInterval new_domain = domain[i];
if (new_domain.empty()) domain_is_empty = true;
if (!Contains(orig_domain, new_domain)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Cannot slice dimension ", i, " {", d.index_domain_dimension<view>(),
"} with interval {", domain[i], "}"));
}
d.domain() = new_domain;
d.implicit_lower_bound() = false;
d.implicit_upper_bound() = false;
}
if (domain_is_empty) {
ReplaceAllIndexArrayMapsWithConstantMaps(rep.get());
}
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexDomain<>> SliceByBox(IndexDomain<> domain, BoxView<> box) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto transform, internal_index_space::SliceByBox(
TransformAccess::transform(std::move(domain)), box));
return std::move(transform).domain();
}
}
Result<bool> GetOutputRange(IndexTransformView<> transform,
MutableBoxView<> output_range) {
assert(output_range.rank() == transform.output_rank());
DimensionSet input_dim_used;
bool exact = true;
for (DimensionIndex output_dim = 0, output_rank = transform.output_rank();
output_dim < output_rank; ++output_dim) {
const auto output_index_map = transform.output_index_map(output_dim);
const OutputIndexMethod method = output_index_map.stride() == 0
? OutputIndexMethod::constant
: output_index_map.method();
switch (method) {
case OutputIndexMethod::constant: {
TENSORSTORE_ASSIGN_OR_RETURN(
output_range[output_dim],
IndexInterval::Sized(output_index_map.offset(), 1));
break;
}
case OutputIndexMethod::single_input_dimension: {
const Index stride = output_index_map.stride();
if (stride < -1 || stride > 1) exact = false;
const DimensionIndex input_dim = output_index_map.input_dimension();
if (input_dim_used[input_dim]) {
exact = false;
} else {
input_dim_used[input_dim] = true;
}
TENSORSTORE_ASSIGN_OR_RETURN(
output_range[output_dim],
GetAffineTransformRange(transform.input_domain()[input_dim],
output_index_map.offset(), stride));
break;
}
case OutputIndexMethod::array: {
exact = false;
const auto index_array_ref = output_index_map.index_array();
TENSORSTORE_ASSIGN_OR_RETURN(
output_range[output_dim],
GetAffineTransformRange(index_array_ref.index_range(),
output_index_map.offset(),
output_index_map.stride()));
break;
}
}
}
return exact;
}
namespace internal_index_space {
absl::Status ValidateInputDimensionResize(
OptionallyImplicitIndexInterval input_domain, Index requested_inclusive_min,
Index requested_exclusive_max) {
if (requested_inclusive_min != kImplicit &&
requested_inclusive_min != -kInfIndex &&
!IsFiniteIndex(requested_inclusive_min)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid requested inclusive min value ", requested_inclusive_min));
}
if (requested_exclusive_max != kImplicit &&
requested_exclusive_max != kInfIndex + 1 &&
!IsFiniteIndex(requested_exclusive_max - 1)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid requested exclusive max value ", requested_exclusive_max));
}
if (requested_inclusive_min != kImplicit &&
requested_exclusive_max != kImplicit &&
requested_inclusive_min > requested_exclusive_max) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid requested bounds [", requested_inclusive_min, ", ",
requested_exclusive_max, ")"));
}
if (!input_domain.implicit_lower() && requested_inclusive_min != kImplicit) {
return absl::InvalidArgumentError("Cannot change explicit lower bound");
}
if (!input_domain.implicit_upper() && requested_exclusive_max != kImplicit) {
return absl::InvalidArgumentError("Cannot change explicit upper bound");
}
return absl::OkStatus();
}
}
absl::Status PropagateInputDomainResizeToOutput(
IndexTransformView<> transform,
span<const Index> requested_input_inclusive_min,
span<const Index> requested_input_exclusive_max,
bool can_resize_tied_bounds, span<Index> output_inclusive_min_constraint,
span<Index> output_exclusive_max_constraint,
span<Index> new_output_inclusive_min, span<Index> new_output_exclusive_max,
bool* is_noop) {
assert(transform.valid());
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
assert(requested_input_inclusive_min.size() == transform.input_rank());
assert(requested_input_exclusive_max.size() == transform.input_rank());
assert(output_inclusive_min_constraint.size() == transform.output_rank());
assert(output_exclusive_max_constraint.size() == transform.output_rank());
assert(new_output_inclusive_min.size() == transform.output_rank());
assert(new_output_exclusive_max.size() == transform.output_rank());
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::ValidateInputDimensionResize(
transform.input_domain()[input_dim],
requested_input_inclusive_min[input_dim],
requested_input_exclusive_max[input_dim]),
MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Invalid resize request for input dimension ", input_dim)));
}
bool is_noop_value = true;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
output_inclusive_min_constraint[output_dim] = kImplicit;
output_exclusive_max_constraint[output_dim] = kImplicit;
new_output_inclusive_min[output_dim] = kImplicit;
new_output_exclusive_max[output_dim] = kImplicit;
const auto map = transform.output_index_map(output_dim);
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const DimensionIndex input_dim = map.input_dimension();
const Index requested_min = requested_input_inclusive_min[input_dim];
const Index requested_max = requested_input_exclusive_max[input_dim];
if (requested_min != kImplicit || requested_max != kImplicit) {
is_noop_value = false;
if (std::abs(map.stride()) != 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim,
" depends on resized input dimension ", input_dim,
" with non-unit stride of ", map.stride()));
}
Result<OptionallyImplicitIndexInterval> output_bounds =
GetAffineTransformRange(
{IndexInterval::UncheckedHalfOpen(
requested_min == kImplicit ? -kInfIndex : requested_min,
requested_max == kImplicit ? kInfIndex + 1 : requested_max),
requested_min == kImplicit, requested_max == kImplicit},
map.offset(), map.stride());
if (!output_bounds) {
return MaybeAnnotateStatus(
output_bounds.status(),
tensorstore::StrCat(
"Error propagating bounds for output dimension ", output_dim,
" from requested bounds for input dimension ", input_dim));
}
if (!output_bounds->implicit_lower()) {
new_output_inclusive_min[output_dim] = output_bounds->inclusive_min();
}
if (!output_bounds->implicit_upper()) {
new_output_exclusive_max[output_dim] = output_bounds->exclusive_max();
}
}
}
*is_noop = is_noop_value;
if (is_noop_value) return absl::OkStatus();
DimensionIndex num_input_dim_deps[kMaxRank];
std::fill_n(num_input_dim_deps, input_rank, static_cast<DimensionIndex>(0));
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_map(output_dim);
switch (map.method()) {
case OutputIndexMethod::constant:
if (!IsFiniteIndex(map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim,
" has constant map with invalid offset ", map.offset()));
}
if (!can_resize_tied_bounds) {
output_inclusive_min_constraint[output_dim] = map.offset();
output_exclusive_max_constraint[output_dim] = map.offset() + 1;
}
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
if (!can_resize_tied_bounds) {
if (num_input_dim_deps[input_dim]++ != 0) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Input dimension ", input_dim,
" corresponds to a diagonal but "
"`resize_tied_bounds` was not specified"));
}
if (std::abs(map.stride()) != 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim, " depends on input dimension ",
input_dim, " with non-unit stride of ", map.stride(),
" but `resize_tied_bounds` was not specified"));
}
Result<OptionallyImplicitIndexInterval> output_bounds =
GetAffineTransformRange(transform.input_domain()[input_dim],
map.offset(), map.stride());
if (!output_bounds) {
return MaybeAnnotateStatus(
output_bounds.status(),
tensorstore::StrCat(
"Error propagating bounds for output dimension ",
output_dim, " from existing bounds for input dimension ",
input_dim));
}
if (!output_bounds->implicit_lower()) {
output_inclusive_min_constraint[output_dim] =
output_bounds->inclusive_min();
}
if (!output_bounds->implicit_upper()) {
output_exclusive_max_constraint[output_dim] =
output_bounds->exclusive_max();
}
}
break;
}
case OutputIndexMethod::array:
if (!can_resize_tied_bounds) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim,
" has index array map but `resize_tied_bounds` was "
"not specified"));
}
break;
}
}
return absl::OkStatus();
}
namespace {
template <typename MergeFn>
inline Result<IndexDomain<>> MergeIndexDomainsImpl(IndexDomainView<> a,
IndexDomainView<> b,
MergeFn merge) {
if (!a.valid()) return b;
if (!b.valid()) return a;
if (a.rank() != b.rank()) {
return absl::InvalidArgumentError("Ranks do not match");
}
const DimensionIndex rank = a.rank();
auto new_rep = internal_index_space::TransformRep::Allocate(rank, 0);
new_rep->input_rank = rank;
new_rep->output_rank = 0;
const auto a_labels = a.labels();
const auto b_labels = b.labels();
for (DimensionIndex i = 0; i < rank; ++i) {
auto status = [&] {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_label, MergeDimensionLabels(a_labels[i], b_labels[i]));
TENSORSTORE_ASSIGN_OR_RETURN(auto new_bounds, merge(a[i], b[i]));
new_rep->input_dimension(i) =
IndexDomainDimension<view>(new_bounds, new_label);
return absl::OkStatus();
}();
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(
status, tensorstore::StrCat("Mismatch in dimension ", i));
}
}
internal_index_space::DebugCheckInvariants(new_rep.get());
return internal_index_space::TransformAccess::Make<IndexDomain<>>(
std::move(new_rep));
}
}
Result<IndexDomain<>> MergeIndexDomains(IndexDomainView<> a,
IndexDomainView<> b) {
auto result =
MergeIndexDomainsImpl(a, b, MergeOptionallyImplicitIndexIntervals);
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot merge index domain ", a,
" with index domain ", b));
}
return result;
}
Result<IndexDomain<>> HullIndexDomains(IndexDomainView<> a,
IndexDomainView<> b) {
auto result = MergeIndexDomainsImpl(
a, b,
[](OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b)
-> Result<OptionallyImplicitIndexInterval> { return Hull(a, b); });
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot hull index domain ", a,
" with index domain ", b));
}
return result;
}
Result<IndexDomain<>> IntersectIndexDomains(IndexDomainView<> a,
IndexDomainView<> b) {
auto result = MergeIndexDomainsImpl(
a, b,
[](OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b)
-> Result<OptionallyImplicitIndexInterval> {
return Intersect(a, b);
});
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot intersect index domain ",
a, " with index domain ", b));
}
return result;
}
Result<IndexDomain<>> ConstrainIndexDomain(IndexDomainView<> a,
IndexDomainView<> b) {
auto result = MergeIndexDomainsImpl(
a, b,
[](OptionallyImplicitIndexInterval ai, OptionallyImplicitIndexInterval bi)
-> Result<OptionallyImplicitIndexInterval> {
const bool constrain_lower =
ai.implicit_lower() && ai.inclusive_min() == -kInfIndex;
const bool constrain_upper =
ai.implicit_upper() && ai.inclusive_max() == kInfIndex;
return OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(
constrain_lower ? bi.inclusive_min() : ai.inclusive_min(),
constrain_upper ? bi.inclusive_max() : ai.inclusive_max()),
constrain_lower ? bi.implicit_lower() : ai.implicit_lower(),
constrain_upper ? bi.implicit_upper() : ai.implicit_upper()};
});
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot constrain index domain ",
a, " with index domain ", b));
}
return result;
}
namespace internal {
OneToOneInputDimensions GetOneToOneInputDimensions(
IndexTransformView<> transform, bool require_unit_stride) {
DimensionSet non_one_to_one_input_dims;
DimensionSet seen_input_dims;
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const Index stride = map.stride();
if (require_unit_stride ? (stride != 1 && stride != -1)
: stride == std::numeric_limits<Index>::min()) {
non_one_to_one_input_dims[input_dim] = true;
break;
}
if (seen_input_dims[input_dim]) {
non_one_to_one_input_dims[input_dim] = true;
break;
}
seen_input_dims[input_dim] = true;
break;
}
case OutputIndexMethod::array: {
const auto index_array = map.index_array();
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
if (index_array.byte_strides()[input_dim] != 0) {
non_one_to_one_input_dims[input_dim] = true;
}
}
break;
}
}
}
return {seen_input_dims & ~non_one_to_one_input_dims,
non_one_to_one_input_dims};
}
void ComputeInputDimensionReferenceCounts(
IndexTransformView<> transform,
span<DimensionIndex> input_dimension_reference_counts) {
using internal_index_space::TransformAccess;
assert(transform.valid());
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_dimension_reference_counts.size() == input_rank);
std::fill_n(input_dimension_reference_counts.begin(), input_rank,
DimensionIndex(0));
auto transform_rep = TransformAccess::rep(transform);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& output_map = transform_rep->output_index_maps()[output_dim];
switch (output_map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension:
++input_dimension_reference_counts[output_map.input_dimension()];
break;
case OutputIndexMethod::array: {
const auto& index_array_data = output_map.index_array_data();
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
if (index_array_data.byte_strides[input_dim] != 0) {
++input_dimension_reference_counts[input_dim];
}
}
break;
}
}
}
}
std::pair<DimensionSet, bool> GetInputDimensionsForOutputDimension(
IndexTransformView<> transform, DimensionIndex output_dim) {
DimensionSet input_dims;
bool has_array_dependence = false;
const auto map = transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
input_dims[map.input_dimension()] = true;
break;
}
case OutputIndexMethod::array: {
const auto index_array = map.index_array();
const DimensionIndex input_rank = transform.input_rank();
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (index_array.byte_strides()[input_dim] != 0) {
input_dims[input_dim] = true;
has_array_dependence = true;
}
}
break;
}
}
return {input_dims, has_array_dependence};
}
}
Result<IndexTransform<>> ComposeOptionalTransforms(IndexTransform<> b_to_c,
IndexTransform<> a_to_b) {
if (!b_to_c.valid()) return a_to_b;
if (!a_to_b.valid()) return b_to_c;
return ComposeTransforms(std::move(b_to_c), std::move(a_to_b));
}
namespace internal_index_space {
bool IndexTransformNonNullSerializer::Encode(serialization::EncodeSink& sink,
IndexTransformView<> value) {
return serialization::Encode(sink, ::nlohmann::json(value));
}
bool IndexTransformNonNullSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
::nlohmann::json json;
if (!serialization::Decode(source, json)) return false;
TENSORSTORE_ASSIGN_OR_RETURN(
value,
internal_index_space::ParseIndexTransformFromJson(
json, input_rank_constraint, output_rank_constraint),
(source.Fail(_), false));
return true;
}
bool IndexTransformSerializer::Encode(serialization::EncodeSink& sink,
IndexTransformView<> value) {
return serialization::MaybeNullSerializer<IndexTransformView<>,
IndexTransformNonNullSerializer,
serialization::IsValid>()
.Encode(sink, value);
}
bool IndexTransformSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
return serialization::MaybeNullSerializer<
internal_index_space::TransformRep::Ptr<>,
IndexTransformNonNullSerializer>{
IndexTransformNonNullSerializer{input_rank_constraint,
output_rank_constraint}}
.Decode(source, value);
}
bool IndexDomainNonNullSerializer::Encode(serialization::EncodeSink& sink,
IndexDomainView<> value) {
return serialization::Encode(sink, ::nlohmann::json(value));
}
bool IndexDomainNonNullSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
::nlohmann::json json;
if (!serialization::Decode(source, json)) return false;
TENSORSTORE_ASSIGN_OR_RETURN(
value,
internal_index_space::ParseIndexDomainFromJson(json, rank_constraint),
(source.Fail(_), false));
return true;
}
bool IndexDomainSerializer::Encode(serialization::EncodeSink& sink,
IndexDomainView<> value) {
return serialization::MaybeNullSerializer<IndexDomainView<>,
IndexDomainNonNullSerializer,
serialization::IsValid>()
.Encode(sink, value);
}
bool IndexDomainSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
return serialization::MaybeNullSerializer<
internal_index_space::TransformRep::Ptr<>,
IndexDomainNonNullSerializer>{
IndexDomainNonNullSerializer{rank_constraint}}
.Decode(source, value);
}
}
} | #include "tensorstore/index_space/index_transform.h"
#include <array>
#include <string_view>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::HullIndexDomains;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexDomainDimension;
using ::tensorstore::IndexDomainView;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::IntersectIndexDomains;
using ::tensorstore::IsIndexDomain;
using ::tensorstore::kInfIndex;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeIndexDomains;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StaticCast;
using ::tensorstore::StaticRankCast;
using ::tensorstore::StrCat;
using ::tensorstore::unchecked;
using ::tensorstore::view;
using ::tensorstore::internal::ComputeInputDimensionReferenceCounts;
using ::tensorstore::internal::GetInputDimensionsForOutputDimension;
using ::tensorstore::internal_index_space::TransformAccess;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(IndexTransformTest, Equality) {
EXPECT_EQ(IndexTransform<>(), IndexTransform<>());
EXPECT_EQ(IndexTransformBuilder<>(2, 3).Finalize().value(),
IndexTransformBuilder<>(2, 3).Finalize().value());
EXPECT_NE(IndexTransformBuilder<>(2, 3).Finalize().value(),
IndexTransformBuilder<>(2, 2).Finalize().value());
EXPECT_NE(IndexTransformBuilder<>(3, 2).Finalize().value(),
IndexTransformBuilder<>(2, 2).Finalize().value());
EXPECT_EQ(
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 4}).Finalize().value(),
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 4}).Finalize().value());
EXPECT_NE(
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 4}).Finalize().value(),
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 3}).Finalize().value());
EXPECT_EQ(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({3, 4, 5})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({3, 4, 5})
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({3, 4, 5})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 2})
.input_shape({3, 4, 5})
.Finalize()
.value());
EXPECT_EQ(IndexTransformBuilder<>(3, 2)
.input_labels({"x", "y", "z"})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_labels({"x", "y", "z"})
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_labels({"a", "b", "c"})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_labels({"a", "b", "d"})
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2).Finalize().value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(0, 0)
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(0, 0)
.Finalize()
.value());
EXPECT_EQ(
IndexTransformBuilder<>(3, 2).output_constant(0, 2).Finalize().value(),
IndexTransformBuilder<>(3, 2).output_constant(0, 2).Finalize().value());
EXPECT_NE(
IndexTransformBuilder<>(3, 2).output_constant(0, 1).Finalize().value(),
IndexTransformBuilder<>(3, 2).output_constant(0, 2).Finalize().value());
EXPECT_EQ(IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 0, 2, 1)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 0, 2, 1)
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 0, 2, 1)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 1)
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 1}}}))
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 2}}}))
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 4))
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 5))
.Finalize()
.value());
}
TEST(IndexTransformTest, ImplicitConversion) {
IndexTransform<2, 2> t = IdentityTransform<2>();
IndexTransform<> t_labeled = t;
EXPECT_EQ(IdentityTransform(2), t_labeled);
}
TEST(IndexTransformTest, Assign) {
auto make_labeled_transform = [] {
return IndexTransformBuilder<3, 3>()
.input_origin({0, 1, 2})
.input_shape({2, 2, 3})
.input_labels({"x", "y", "z"})
.output_index_array(0, 1, 4, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 4))
.output_single_input_dimension(1, 2, 5, 1)
.output_constant(2, 3)
.Finalize()
.value();
};
auto make_transform = [] {
return IndexTransformBuilder<3, 3>()
.input_origin({0, 1, 2})
.input_shape({2, 2, 3})
.output_index_array(0, 1, 4, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 4))
.output_single_input_dimension(1, 2, 5, 1)
.output_constant(2, 3)
.Finalize()
.value();
};
auto make_identity = [] { return IdentityTransform(2); };
auto make_labeled_identity = [] {
return IdentityTransform(span<const std::string_view>({"x", "y"}));
};
auto unlabeled_t = make_identity();
{
auto unlabeled_t2 = make_identity();
unlabeled_t2 = make_transform();
auto* rep_t2 = TransformAccess::rep(unlabeled_t2);
unlabeled_t = std::move(unlabeled_t2);
EXPECT_EQ(rep_t2, TransformAccess::rep(unlabeled_t));
EXPECT_EQ(nullptr, TransformAccess::rep(unlabeled_t2));
}
unlabeled_t = make_transform();
EXPECT_EQ(make_transform(), unlabeled_t);
unlabeled_t = IndexTransform<2, 2>();
EXPECT_FALSE(unlabeled_t.valid());
auto labeled_t = make_labeled_identity();
labeled_t = make_labeled_transform();
EXPECT_EQ(make_labeled_transform(), labeled_t);
{
auto labeled_t2 = make_labeled_transform();
labeled_t = labeled_t2;
EXPECT_EQ(labeled_t, make_labeled_transform());
labeled_t = make_labeled_identity();
}
{
auto labeled_t3 = make_labeled_identity();
labeled_t3 = make_labeled_transform();
labeled_t = labeled_t3;
EXPECT_EQ(make_labeled_transform(), labeled_t);
}
{
IndexTransform<2, 2> invalid_t;
labeled_t = invalid_t;
EXPECT_FALSE(labeled_t.valid());
}
}
TEST(IndexTransformTest, ToString) {
EXPECT_EQ("<Invalid index space transform>",
StrCat(IndexTransformView<1, 1>()));
EXPECT_EQ(
R"s(Rank 3 -> 4 index space transform:
Input domain:
0: [1*, 3) "x"
1: [2, 4*) "y"
2: [3, 7) "z"
Output index maps:
out[0] = 4
out[1] = 5 + 7 * in[2]
out[2] = 6
out[3] = 7 + 9 * bounded([0, 4), array(in)), where array =
{{{1, 0, 2, 2}}}
)s",
StrCat(IndexTransformBuilder<>(3, 4)
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9,
MakeArray<Index>({{{1, 0, 2, 2}}}),
IndexInterval::Closed(0, 3))
.Finalize()
.value()));
}
TEST(IndexTransformTest, GTestToString) {
EXPECT_EQ(
R"s(Rank 3 -> 4 index space transform:
Input domain:
0: [1, 3) "x"
1: [2, 4) "y"
2: [3, 7) "z"
Output index maps:
out[0] = 4
out[1] = 5 + 7 * in[2]
out[2] = 6
out[3] = 7 + 9 * bounded([0, 4), array(in)), where array =
{{{1, 0, 2, 2}}}
)s",
::testing::PrintToString(
IndexTransformBuilder<>(3, 4)
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, MakeArray<Index>({{{1, 0, 2, 2}}}),
IndexInterval::Closed(0, 3))
.Finalize()
.value()));
}
TEST(IndexTransformTest, Constant) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({4})
.output_constant(0, 10)
.Finalize()
.value();
std::array<Index, 1> output_indices;
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(10));
}
TEST(IndexTransformTest, SingleInputDimension) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({20})
.output_single_input_dimension(0, 5, 2, 0)
.Finalize()
.value();
std::array<Index, 1> output_indices;
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({6}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 6));
}
TEST(IndexTransformTest, IndexArray) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({3})
.output_index_array(0, 5, 2, MakeArray<Index>({4, 5, 6}))
.Finalize()
.value();
std::array<Index, 1> output_indices;
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({1}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 4));
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({2}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 5));
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 6));
}
TEST(TransformIndicesTest, ConstantAndSingleInputDimensionAndIndexArray) {
auto t = IndexTransformBuilder<>(3, 3)
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3,
MakeArray<Index>({{{5}, {6}, {7}, {8}}}))
.Finalize()
.value();
std::array<Index, 3> output_indices;
ASSERT_EQ(
absl::OkStatus(),
t.TransformIndices(span<const Index, 3>({2, 4, 5}), output_indices));
EXPECT_THAT(output_indices,
::testing::ElementsAre(10, 20 + 2 * 5, 30 + 3 * 7));
}
TEST(TransformIndicesTest, Implicit) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.implicit_lower_bounds({1})
.input_shape({3})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
std::array<Index, 1> output_indices;
EXPECT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({-3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(-3));
EXPECT_THAT(t.TransformIndices(span<const Index, 1>({10}), output_indices),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 10 is not contained in the domain "
"\\[1\\*, 4\\) for input dimension 0"));
}
TEST(TransformIndicesTest, IndexRangeError) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({5, 6, 7}),
IndexInterval::Closed(6, 7))
.Finalize()
.value();
std::array<Index, 1> output_indices;
EXPECT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({2}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(6));
EXPECT_THAT(t.TransformIndices(span<const Index, 1>({1}), output_indices),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Computing index for output dimension 0: "
"Checking result of index array output index map: "
"Index 5 is outside valid range \\[6, 8\\)"));
}
TEST(IndexTransformTest, ConstructMove) {
auto t = IdentityTransform(2);
auto* data = TransformAccess::rep(t);
IndexTransform<> t2(std::move(t));
EXPECT_EQ(data, TransformAccess::rep(t2));
}
TEST(IndexTransformTest, AssignMove) {
auto t = IdentityTransform(2);
auto* data = TransformAccess::rep(t);
IndexTransform<> t2;
t2 = std::move(t);
EXPECT_EQ(data, TransformAccess::rep(t2));
}
TEST(IndexDomainTest, DefaultConstruct) {
IndexDomainView<> d;
EXPECT_FALSE(d.valid());
}
TEST(IndexDomainTest, ConstructFromTransform) {
auto d = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
ASSERT_TRUE(d.valid());
EXPECT_EQ(2, d.rank());
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 4));
EXPECT_THAT(d.implicit_lower_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(d.implicit_upper_bounds(), DimensionSet::FromBools({0, 1}));
EXPECT_THAT(d.labels(), ::testing::ElementsAre("x", "y"));
EXPECT_EQ(IndexDomainDimension<view>(
{IndexInterval::UncheckedSized(1, 3), true, false}, "x"),
d[0]);
EXPECT_EQ(IndexDomainDimension<view>(
{IndexInterval::UncheckedSized(2, 4), false, true}, "y"),
d[1]);
EXPECT_EQ(12, d.num_elements());
}
TEST(IndexDomainTest, CompareEqual) {
IndexDomain<2> d1;
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
IndexDomain<2> d3(IndexTransformBuilder<2, 1>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.input_labels({"x", "y"})
.output_constant(0, 1)
.Finalize()
.value()
.domain());
auto d4 = IndexDomainBuilder<2>()
.origin({1, 3})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d5 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d6 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d7 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d8 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"z", "y"})
.Finalize()
.value();
EXPECT_EQ(d1, d1);
EXPECT_EQ(d2, d2);
EXPECT_EQ(d3, d3);
EXPECT_EQ(d4, d4);
EXPECT_EQ(d5, d5);
EXPECT_EQ(d6, d6);
EXPECT_EQ(d7, d7);
EXPECT_EQ(d8, d8);
EXPECT_NE(d1, d2);
EXPECT_EQ(d2, d3);
EXPECT_NE(d2, d4);
EXPECT_NE(d2, d5);
EXPECT_NE(d2, d6);
EXPECT_NE(d2, d7);
EXPECT_NE(d2, d8);
}
TEST(IndexDomainTest, ConvertRank) {
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
IndexDomain<> d_dynamic = d2;
EXPECT_EQ(d_dynamic, d2);
IndexDomain<> d_dynamic_from_rvalue = IndexDomain<2>(d2);
EXPECT_EQ(d_dynamic_from_rvalue, d2);
auto d2_cast = StaticRankCast<2>(d_dynamic);
static_assert(std::is_same_v<decltype(d2_cast), Result<IndexDomain<2>>>);
EXPECT_EQ(d2_cast, d2);
auto d2_cast_rvalue = StaticRankCast<2>(IndexDomain<>(d_dynamic));
static_assert(
std::is_same_v<decltype(d2_cast_rvalue), Result<IndexDomain<2>>>);
EXPECT_EQ(d2_cast_rvalue, d2);
EXPECT_THAT(StaticRankCast<3>(d_dynamic),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast index domain with rank of 2 "
"to index domain with rank of 3"));
}
TEST(IndexDomainTest, SubDomain) {
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d3 = IndexDomainBuilder<2>()
.origin({2, 1})
.shape({4, 3})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.labels({"y", "x"})
.Finalize()
.value();
EXPECT_EQ(d3, (d2[span<const DimensionIndex, 2>({1, 0})]));
}
TEST(IndexDomainTest, PrintToOstream) {
EXPECT_EQ("<invalid index domain>", StrCat(IndexDomain<2>()));
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
EXPECT_EQ(R"({ "x": [1*, 4), "y": [2, 6*) })", StrCat(d2));
}
static_assert(IsIndexDomain<bool> == false);
static_assert(IsIndexDomain<IndexDomain<3>> == true);
static_assert(IsIndexDomain<IndexDomainView<3>> == true);
TEST(CastTest, IndexTransform) {
auto t = IdentityTransform(span<const Index>({2, 3}));
auto t2 = StaticCast<IndexTransform<2, 2>, unchecked>(t);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t2);
EXPECT_THAT((StaticCast<IndexTransformView<2, 2>>(t)),
::testing::Optional(t));
EXPECT_THAT(
(StaticCast<IndexTransform<2, 3>>(t)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 2 and output rank of 3"));
EXPECT_THAT(
(tensorstore::StaticRankCast<3>(t)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 3 and output dynamic rank"));
}
TEST(CastTest, IndexTransformView) {
auto t = IdentityTransform(span<const Index>({2, 3}));
IndexTransformView<> t_ref = t;
auto t2 = StaticCast<IndexTransformView<2, 2>>(t_ref);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t2);
EXPECT_THAT((StaticCast<IndexTransform<2, 2>>(t_ref)),
::testing::Optional(t));
EXPECT_THAT(
(StaticCast<IndexTransformView<2, 3>>(t_ref)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 2 and output rank of 3"));
EXPECT_THAT(
(tensorstore::StaticRankCast<3>(t_ref)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 3 and output dynamic rank"));
}
TEST(MergeIndexDomainsTest, Basic) {
EXPECT_THAT(MergeIndexDomains(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain1,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(MergeIndexDomains(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(MergeIndexDomains(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(MergeIndexDomains(domain1, domain1),
::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(
MergeIndexDomains(domain1, domain2),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot merge index domain \\{ .* \\} with index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain3,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, 5, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain4,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, 12})
.labels({"", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain4_merged,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, 12})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(MergeIndexDomains(domain1, domain3),
::testing::Optional(domain3));
EXPECT_THAT(MergeIndexDomains(domain1, domain4),
::testing::Optional(domain4_merged));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain5,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"z", "", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain6,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({2, -kInfIndex, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"x", "", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain7,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 12, kInfIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(MergeIndexDomains(domain1, domain5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge .*: "
"Mismatch in dimension 0: "
"Dimension labels do not match"));
EXPECT_THAT(MergeIndexDomains(domain1, domain6),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge .*: "
"Mismatch in dimension 0: "
"Lower bounds do not match"));
EXPECT_THAT(MergeIndexDomains(domain1, domain7),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge .*: "
"Mismatch in dimension 1: "
"Upper bounds do not match"));
}
TEST(HullIndexDomains, Basic) {
EXPECT_THAT(HullIndexDomains(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain1, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({1, kMinFiniteIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kMaxFiniteIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(HullIndexDomains(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(HullIndexDomains(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(HullIndexDomains(domain1, domain1), ::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(
HullIndexDomains(domain1, domain2),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot hull index domain \\{ .* \\} with index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain3, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain4, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(HullIndexDomains(domain1, domain3), ::testing::Optional(domain4));
}
TEST(IntersectIndexDomains, Basic) {
EXPECT_THAT(IntersectIndexDomains(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain1, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({1, kMinFiniteIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kMaxFiniteIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(IntersectIndexDomains(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(IntersectIndexDomains(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(IntersectIndexDomains(domain1, domain1),
::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(IntersectIndexDomains(domain1, domain2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot intersect index domain \\{ .* \\} with "
"index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain3, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain4, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({1, kMinFiniteIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kMaxFiniteIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(IntersectIndexDomains(domain1, domain3),
::testing::Optional(domain4));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain5, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain6, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({1, 1, 1})
.origin({1, kMinFiniteIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kMaxFiniteIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(IntersectIndexDomains(domain1, domain5),
::testing::Optional(domain6));
}
TEST(ConstrainIndexDomain, Basic) {
using ::tensorstore::ConstrainIndexDomain;
EXPECT_THAT(ConstrainIndexDomain(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain1, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({1, kMinFiniteIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kMaxFiniteIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(ConstrainIndexDomain(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(ConstrainIndexDomain(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(ConstrainIndexDomain(domain1, domain1),
::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(ConstrainIndexDomain(domain1, domain2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot constrain index domain \\{ .* \\} with "
"index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain3, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, -100})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain4, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, kMinFiniteIndex, -100})
.inclusive_max({9, kMaxFiniteIndex, kMaxFiniteIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(ConstrainIndexDomain(domain3, domain1),
::testing::Optional(domain4));
}
TEST(IndexTransformTest, WithImplicitDimensions) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_transform,
IndexTransformBuilder(3, 3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.output_identity_transform()
.Finalize());
EXPECT_EQ(expected_transform,
WithImplicitDimensions(IdentityTransform(3),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexTransformTest, WithImplicitDimensionsIndexArray) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_transform,
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({0, 1, 2}))
.Finalize());
EXPECT_EQ(
expected_transform,
WithImplicitDimensions(expected_transform, DimensionSet::FromBools({1}),
DimensionSet::FromBools({1})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder(1)
.shape({3})
.implicit_lower_bounds({1})
.implicit_upper_bounds({1})
.Finalize());
EXPECT_EQ(expected_domain,
WithImplicitDimensions(expected_transform.domain(),
DimensionSet::FromBools({1}),
DimensionSet::FromBools({1})));
}
TEST(IndexTransformTest, WithImplicitDimensionsStaticRank) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_transform,
(IndexTransformBuilder<3, 3>()
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.output_identity_transform()
.Finalize()));
EXPECT_EQ(expected_transform,
WithImplicitDimensions(IdentityTransform<3>(),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexDomainTest, WithImplicitDimensions) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.Finalize());
EXPECT_EQ(
expected_domain,
WithImplicitDimensions(IndexDomain(3), DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexDomainTest, WithImplicitDimensionsStaticRank) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder<3>()
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.Finalize());
EXPECT_EQ(expected_domain,
WithImplicitDimensions(IndexDomain<3>(tensorstore::StaticRank<3>{}),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexDomainTest, ApplyIndexTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain,
IndexDomainBuilder<3>().origin({1, 2, 3}).shape({5, 5, 5}).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, (IndexTransformBuilder<4, 3>()
.output_single_input_dimension(0, 5, 1, 3)
.output_single_input_dimension(1, -7, 1, 0)
.output_single_input_dimension(2, 3, 1, 1)
.Finalize()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder<4>()
.origin({9, 0, -kInfIndex, -4})
.shape({5, 5, tensorstore::kInfSize, 5})
.implicit_lower_bounds({0, 0, 1, 0})
.implicit_upper_bounds({0, 0, 1, 0})
.Finalize());
EXPECT_THAT(domain | transform, ::testing::Optional(expected_domain));
}
TEST(IndexTransformSerializationTest, Basic) {
TestSerializationRoundTrip(tensorstore::IndexTransform<>());
TestSerializationRoundTrip(tensorstore::IdentityTransform(5));
}
TEST(IndexDomainSerializationTest, Basic) {
TestSerializationRoundTrip(tensorstore::IndexDomain<>());
TestSerializationRoundTrip(
tensorstore::IndexDomain<>(tensorstore::IdentityTransform(5).domain()));
}
TEST(ComputeInputDimensionReferenceCountsTest, Identity) {
DimensionIndex reference_counts[3];
ComputeInputDimensionReferenceCounts(IdentityTransform(3), reference_counts);
EXPECT_THAT(reference_counts, ::testing::ElementsAre(1, 1, 1));
}
TEST(ComputeInputDimensionReferenceCountsTest, IndexArray) {
DimensionIndex reference_counts[3];
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(3, 1)
.input_shape({2, 2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 2}, {3, 4}}}))
.Finalize());
ComputeInputDimensionReferenceCounts(transform, reference_counts);
EXPECT_THAT(reference_counts, ::testing::ElementsAre(0, 1, 1));
}
TEST(GetInputDimensionsForOutputDimensionTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(3, 3)
.input_shape({2, 2, 2})
.output_constant(0, 42)
.output_single_input_dimension(1, 0, 1, 1)
.output_index_array(2, 0, 1, MakeArray<Index>({{{1, 2}, {3, 4}}}))
.Finalize());
EXPECT_THAT(GetInputDimensionsForOutputDimension(transform, 0),
::testing::Pair(DimensionSet(), false));
EXPECT_THAT(GetInputDimensionsForOutputDimension(transform, 1),
::testing::Pair(DimensionSet::FromBools({0, 1, 0}), false));
EXPECT_THAT(GetInputDimensionsForOutputDimension(transform, 2),
::testing::Pair(DimensionSet::FromBools({0, 1, 1}), true));
}
TEST(TranslateOutputDimensionsByTest, Basic) {
auto orig_transform = IdentityTransform(3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_transform, IndexTransformBuilder(3, 3)
.output_single_input_dimension(0, 1, 1, 0)
.output_single_input_dimension(1, 2, 1, 1)
.output_single_input_dimension(2, 3, 1, 2)
.Finalize());
EXPECT_THAT(TranslateOutputDimensionsBy(orig_transform, {{1, 2, 3}}),
::testing::Optional(expected_transform));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c167b628-6d91-4b73-9223-97c19a6001c8 | cpp | google/tensorstore | element_pointer | tensorstore/util/element_pointer.cc | tensorstore/util/element_pointer_test.cc | #include "tensorstore/util/element_pointer.h"
#include <string>
#include "tensorstore/data_type.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_element_pointer {
std::string DescribeForCast(DataType dtype) {
return tensorstore::StrCat("pointer with ",
StaticCastTraits<DataType>::Describe(dtype));
}
}
} | #include "tensorstore/util/element_pointer.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/data_type.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::ElementPointer;
using ::tensorstore::ElementTagTraits;
using ::tensorstore::IsElementTag;
using ::tensorstore::MatchesStatus;
using ::tensorstore::PointerElementTag;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::SharedElementPointer;
using ::tensorstore::StaticDataTypeCast;
static_assert(IsElementTag<int>);
static_assert(IsElementTag<void>);
static_assert(IsElementTag<const void>);
static_assert(IsElementTag<int*>);
static_assert(IsElementTag<const int>);
static_assert(!IsElementTag<volatile int>);
static_assert(!IsElementTag<int(int)>);
static_assert(IsElementTag<int (*)(int)>);
static_assert(IsElementTag<Shared<int>>);
static_assert(!IsElementTag<const Shared<int>>);
static_assert(!IsElementTag<Shared<Shared<int>>>);
static_assert(!IsElementTag<Shared<const Shared<int>>>);
static_assert(!IsElementTag<Shared<const Shared<Shared<int>>>>);
static_assert(std::is_same_v<ElementTagTraits<int>::Pointer, int*>);
static_assert(std::is_same_v<ElementTagTraits<int>::rebind<float>, float>);
static_assert(std::is_same_v<ElementTagTraits<Shared<int>>::Pointer,
std::shared_ptr<int>>);
static_assert(std::is_same_v<ElementTagTraits<Shared<int>>::rebind<float>,
Shared<float>>);
static_assert(std::is_same_v<PointerElementTag<int*>, int>);
static_assert(
std::is_same_v<PointerElementTag<std::shared_ptr<int>>, Shared<int>>);
static_assert(
std::is_convertible_v<ElementPointer<int>, ElementPointer<const int>>);
static_assert(
!std::is_convertible_v<ElementPointer<const int>, ElementPointer<int>>);
static_assert(std::is_convertible_v<ElementPointer<int>, ElementPointer<void>>);
static_assert(
!std::is_convertible_v<ElementPointer<void>, ElementPointer<int>>);
static_assert(
!std::is_convertible_v<ElementPointer<const int>, ElementPointer<int>>);
static_assert(
!std::is_convertible_v<ElementPointer<const int>, ElementPointer<void>>);
static_assert(std::is_convertible_v<ElementPointer<const int>,
ElementPointer<const void>>);
static_assert(std::is_convertible_v<int*, ElementPointer<int>>);
static_assert(!std::is_convertible_v<const int*, ElementPointer<int>>);
static_assert(std::is_convertible_v<const int*, ElementPointer<const int>>);
static_assert(std::is_convertible_v<int*, ElementPointer<void>>);
static_assert(!std::is_convertible_v<const int*, ElementPointer<void>>);
static_assert(std::is_convertible_v<int*, ElementPointer<const int>>);
static_assert(std::is_convertible_v<int*, ElementPointer<const void>>);
static_assert(std::is_constructible_v<ElementPointer<void>, void*, DataType>);
static_assert(
std::is_constructible_v<ElementPointer<const void>, void*, DataType>);
static_assert(!std::is_constructible_v<ElementPointer<void>, void*>);
static_assert(std::is_convertible_v<SharedElementPointer<int>,
SharedElementPointer<const int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<int>>);
static_assert(std::is_convertible_v<SharedElementPointer<int>,
SharedElementPointer<void>>);
static_assert(!std::is_convertible_v<SharedElementPointer<void>,
SharedElementPointer<int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<void>>);
static_assert(std::is_convertible_v<SharedElementPointer<const int>,
SharedElementPointer<const void>>);
static_assert(
std::is_convertible_v<std::shared_ptr<int>, SharedElementPointer<int>>);
static_assert(std::is_convertible_v<std::shared_ptr<const int>,
SharedElementPointer<const int>>);
static_assert(
std::is_convertible_v<std::shared_ptr<int>, SharedElementPointer<void>>);
static_assert(!std::is_convertible_v<std::shared_ptr<const int>,
SharedElementPointer<void>>);
static_assert(std::is_convertible_v<std::shared_ptr<int>,
SharedElementPointer<const int>>);
static_assert(std::is_convertible_v<std::shared_ptr<int>,
SharedElementPointer<const void>>);
static_assert(std::is_constructible_v<SharedElementPointer<void>,
std::shared_ptr<void>, DataType>);
static_assert(std::is_constructible_v<SharedElementPointer<const void>,
std::shared_ptr<void>, DataType>);
static_assert(
std::is_convertible_v<SharedElementPointer<int>, ElementPointer<int>>);
static_assert(std::is_convertible_v<SharedElementPointer<int>,
ElementPointer<const int>>);
static_assert(!std::is_convertible_v<SharedElementPointer<void>,
ElementPointer<const int>>);
static_assert(!std::is_constructible_v<ElementPointer<const int>,
SharedElementPointer<void>>);
static_assert(!std::is_constructible_v<ElementPointer<int>,
SharedElementPointer<const void>>);
TEST(ElementPointerTest, StaticType) {
{
ElementPointer<float> p_null;
EXPECT_EQ(nullptr, p_null.data());
}
{
ElementPointer<float> p_null = nullptr;
EXPECT_EQ(nullptr, p_null.data());
}
float value;
ElementPointer<float> p = &value;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(&value, p.pointer());
EXPECT_EQ(dtype_v<float>, p.dtype());
{
ElementPointer<const float> p_const = p;
EXPECT_EQ(&value, p_const.data());
EXPECT_EQ(dtype_v<float>, p_const.dtype());
p_const.pointer() = nullptr;
EXPECT_EQ(nullptr, p_const.data());
}
{
ElementPointer<float> p_copy = p;
EXPECT_EQ(&value, p_copy.data());
}
ElementPointer<const void> other = p;
EXPECT_EQ(&value, other.data());
EXPECT_EQ(other.dtype(), p.dtype());
{
auto p2 = tensorstore::StaticDataTypeCast<const float>(other);
static_assert(
std::is_same_v<decltype(p2), Result<ElementPointer<const float>>>);
TENSORSTORE_ASSERT_OK(p2);
EXPECT_EQ(&value, p2->data());
}
{
ElementPointer<const float> p_const;
p_const = p;
EXPECT_EQ(&value, p_const.data());
p_const = nullptr;
EXPECT_EQ(nullptr, p_const.data());
}
static_assert(!std::is_assignable_v<ElementPointer<float>,
ElementPointer<const float>>);
static_assert(
!std::is_assignable_v<ElementPointer<int>, ElementPointer<float>>);
static_assert(!std::is_assignable_v<ElementPointer<int>, float*>);
static_assert(!std::is_assignable_v<ElementPointer<void>, void*>);
static_assert(!std::is_assignable_v<ElementPointer<const float>,
ElementPointer<const void>>);
static_assert(!std::is_assignable_v<ElementPointer<float>, void*>);
}
TEST(ElementPointerTest, DynamicType) {
{
ElementPointer<void> p_null;
EXPECT_EQ(nullptr, p_null.data());
EXPECT_EQ(DataType(), p_null.dtype());
}
{
ElementPointer<void> p_null = nullptr;
EXPECT_EQ(nullptr, p_null.data());
EXPECT_EQ(DataType(), p_null.dtype());
}
float value;
{
ElementPointer<void> p = &value;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p = {static_cast<void*>(&value), dtype_v<float>};
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
static_assert(
!std::is_assignable_v<ElementPointer<void>, ElementPointer<const float>>);
static_assert(!std::is_assignable_v<ElementPointer<void>,
SharedElementPointer<const float>>);
static_assert(
!std::is_assignable_v<ElementPointer<void>, ElementPointer<const void>>);
static_assert(!std::is_assignable_v<ElementPointer<void>,
SharedElementPointer<const void>>);
static_assert(!std::is_assignable_v<ElementPointer<void>, void*>);
static_assert(!std::is_assignable_v<ElementPointer<void>, const float*>);
{
ElementPointer<void> p;
p = ElementPointer<float>(&value);
ElementPointer<void> p_copy = p;
EXPECT_EQ(&value, p_copy.data());
EXPECT_EQ(dtype_v<float>, p_copy.dtype());
}
{
ElementPointer<void> p;
p = ElementPointer<float>(&value);
ElementPointer<void> p_copy;
p_copy = p;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
p = ElementPointer<float>(&value);
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
p = &value;
EXPECT_EQ(&value, p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
std::shared_ptr<float> shared_value = std::make_shared<float>();
p = SharedElementPointer<float>(shared_value);
EXPECT_EQ(shared_value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
{
ElementPointer<void> p;
std::shared_ptr<float> shared_value = std::make_shared<float>();
p = SharedElementPointer<void>(shared_value);
EXPECT_EQ(shared_value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
}
TEST(ElementPointerTest, StaticDataTypeCast) {
float value;
EXPECT_THAT(StaticDataTypeCast<std::int32_t>(ElementPointer<void>(&value)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast pointer with data type of float32 to "
"pointer with data type of int32"));
EXPECT_THAT(StaticDataTypeCast<std::int32_t>(
SharedElementPointer<void>(std::make_shared<float>())),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast pointer with data type of float32 to "
"pointer with data type of int32"));
}
TEST(SharedElementPointerTest, StaticType) {
std::shared_ptr<float> value = std::make_shared<float>();
SharedElementPointer<float> p = value;
EXPECT_EQ(value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
{
SharedElementPointer<float> p_copy = p;
EXPECT_EQ(value.get(), p_copy.data());
SharedElementPointer<float> p_move = std::move(p_copy);
EXPECT_EQ(value.get(), p_move.data());
SharedElementPointer<const float> p_const_move = std::move(p_move);
EXPECT_EQ(value.get(), p_const_move.data());
}
{
SharedElementPointer<const void> other = p;
EXPECT_EQ(value.get(), other.data());
EXPECT_EQ(other.dtype(), p.dtype());
EXPECT_EQ(3, value.use_count());
}
EXPECT_EQ(2, value.use_count());
{
ElementPointer<float> x = p;
EXPECT_EQ(value.get(), x.data());
}
{
ElementPointer<const void> x = p;
EXPECT_EQ(value.get(), x.data());
}
{
SharedElementPointer<void> shared_p_void = p;
auto p_float = StaticDataTypeCast<float>(shared_p_void).value();
static_assert(
std::is_same_v<decltype(p_float), SharedElementPointer<float>>);
EXPECT_EQ(value.get(), p_float.data());
}
{
float fvalue;
auto f_pointer = UnownedToShared(ElementPointer<float>(&fvalue));
static_assert(
std::is_same_v<decltype(f_pointer), SharedElementPointer<float>>);
EXPECT_EQ(&fvalue, f_pointer.data());
}
{
SharedElementPointer<float> p2;
EXPECT_TRUE(p2 == nullptr);
EXPECT_TRUE(nullptr == p2);
EXPECT_FALSE(p2 != nullptr);
EXPECT_FALSE(nullptr != p2);
EXPECT_FALSE(p2 == p);
EXPECT_TRUE(p2 != p);
p2 = p;
EXPECT_EQ(value.get(), p2.data());
EXPECT_FALSE(p2 == nullptr);
EXPECT_FALSE(nullptr == p2);
EXPECT_TRUE(p2 != nullptr);
EXPECT_TRUE(nullptr != p2);
EXPECT_TRUE(p2 == p);
EXPECT_FALSE(p2 != p);
}
{
SharedElementPointer<float> p2 = p;
SharedElementPointer<float> p2_move;
p2_move = std::move(p2);
EXPECT_TRUE(p2 == nullptr);
EXPECT_EQ(value.get(), p2_move.data());
}
{
SharedElementPointer<float> p2 = p;
SharedElementPointer<const float> p2_move;
p2_move = std::move(p2);
EXPECT_TRUE(p2 == nullptr);
EXPECT_EQ(value.get(), p2_move.data());
}
{
SharedElementPointer<float> p2 = p;
SharedElementPointer<float> p2_move;
p2_move = std::move(p2.pointer());
EXPECT_TRUE(p2 == nullptr);
EXPECT_EQ(value.get(), p2_move.data());
}
static_assert(!std::is_assignable_v<SharedElementPointer<float>,
SharedElementPointer<void>>);
static_assert(!std::is_assignable_v<SharedElementPointer<float>,
std::shared_ptr<void>>);
}
TEST(SharedElementPointerTest, DynamicType) {
std::shared_ptr<float> value = std::make_shared<float>();
SharedElementPointer<void> p = value;
EXPECT_EQ(value.get(), p.data());
EXPECT_EQ(dtype_v<float>, p.dtype());
}
TEST(ElementPointerTest, Deduction) {
int* raw_int_ptr;
std::shared_ptr<int> shared_int_ptr;
ElementPointer<int> el_ptr;
ElementPointer<void> el_void_ptr;
SharedElementPointer<int> shared_el_ptr;
SharedElementPointer<void> shared_void_el_ptr;
{
auto x = ElementPointer(raw_int_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<int>>);
}
{
auto x = ElementPointer(shared_int_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<Shared<int>>>);
}
{
auto x = ElementPointer(el_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<int>>);
}
{
auto x = ElementPointer(el_void_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<void>>);
}
{
auto x = ElementPointer(shared_el_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<Shared<int>>>);
}
{
auto x = ElementPointer(shared_void_el_ptr);
static_assert(std::is_same_v<decltype(x), ElementPointer<Shared<void>>>);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/element_pointer.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/element_pointer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ac223d64-35cb-4f4c-b7a5-4be97a0a0e39 | cpp | google/tensorstore | status_testutil | tensorstore/util/status_testutil.cc | tensorstore/util/status_testutil_test.cc | #include "tensorstore/util/status_testutil.h"
#include <ostream>
#include <regex>
#include <string>
#include <system_error>
#include <gmock/gmock.h>
#include "absl/status/status.h"
namespace tensorstore {
namespace internal_status {
namespace {
template <typename StringType>
class RegexMatchImpl : public ::testing::MatcherInterface<StringType> {
public:
RegexMatchImpl(const std::string& message_pattern)
: message_pattern_(message_pattern) {}
void DescribeTo(std::ostream* os) const override {
*os << "message matches pattern ";
::testing::internal::UniversalPrint(message_pattern_, os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "message doesn't match pattern ";
::testing::internal::UniversalPrint(message_pattern_, os);
}
bool MatchAndExplain(
StringType message,
::testing::MatchResultListener* result_listener) const override {
return std::regex_match(message, std::regex(message_pattern_));
}
private:
const std::string message_pattern_;
};
}
}
internal_status::StatusIsMatcher MatchesStatus(
absl::StatusCode status_code, const std::string& message_pattern) {
return internal_status::StatusIsMatcher(
status_code, ::testing::Matcher<const std::string&>(
new internal_status::RegexMatchImpl<const std::string&>(
message_pattern)));
}
} | #include "tensorstore/util/status_testutil.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::Future;
using ::tensorstore::Result;
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(StatusTestutilTest, IsOk) {
EXPECT_THAT([]() -> Future<void> { return absl::OkStatus(); }(),
::tensorstore::IsOk());
EXPECT_THAT([]() -> Result<void> { return absl::OkStatus(); }(),
::tensorstore::IsOk());
EXPECT_THAT(absl::OkStatus(), ::tensorstore::IsOk());
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOk());
EXPECT_THAT(Future<int>{2}, ::tensorstore::IsOk());
EXPECT_THAT(absl::InternalError(""), ::testing::Not(::tensorstore::IsOk()));
EXPECT_THAT(Explain(::tensorstore::IsOk(), absl::InternalError("")),
testing::IsEmpty());
EXPECT_THAT(Explain(::tensorstore::IsOk(), absl::OkStatus()),
testing::IsEmpty());
TENSORSTORE_EXPECT_OK(absl::OkStatus());
TENSORSTORE_ASSERT_OK(absl::OkStatus());
TENSORSTORE_EXPECT_OK([]() -> Future<void> { return absl::OkStatus(); }());
TENSORSTORE_ASSERT_OK([]() -> Result<void> { return absl::OkStatus(); }());
}
TEST(StatusTestutilTest, Optional) {
EXPECT_THAT(Result<int>{1}, ::testing::Optional(1));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::testing::Optional(1)));
EXPECT_THAT(Result<int>{1}, ::testing::Optional(::testing::_));
EXPECT_THAT(Result<int>{2}, ::testing::Optional(::testing::Not(1)));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::testing::Optional(1)));
EXPECT_THAT(
Explain(::testing::Optional(1), Result<int>(absl::InternalError(""))),
testing::HasSubstr("which is not engaged"));
EXPECT_THAT(Explain(::testing::Optional(1), Result<int>(2)),
testing::HasSubstr("whose value 2 doesn't match"));
}
TEST(StatusTestutilTest, IsOkAndHolds) {
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOkAndHolds(1));
EXPECT_THAT(Future<int>{2}, ::tensorstore::IsOkAndHolds(2));
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOkAndHolds(::testing::_));
EXPECT_THAT(Result<int>{2}, ::tensorstore::IsOkAndHolds(::testing::Not(1)));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::tensorstore::IsOkAndHolds(1)));
int result;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(result, []() -> Result<int> { return 2; }());
EXPECT_EQ(2, result);
EXPECT_THAT(Explain(::tensorstore::IsOkAndHolds(1),
Result<int>(absl::InternalError(""))),
testing::HasSubstr("whose status code is INTERNAL"));
EXPECT_THAT(Explain(::tensorstore::IsOkAndHolds(1), Result<int>(2)),
testing::HasSubstr("whose value 2 doesn't match"));
}
TEST(StatusTestutilTest, StatusIs) {
EXPECT_THAT(Result<void>{absl::InternalError("")},
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(Future<void>{absl::InternalError("")},
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(absl::InternalError(""),
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(
absl::OkStatus(),
::testing::Not(::tensorstore::StatusIs(absl::StatusCode::kInternal)));
EXPECT_THAT(absl::OkStatus(), ::tensorstore::StatusIs(absl::StatusCode::kOk));
EXPECT_THAT(Explain(::tensorstore::StatusIs(absl::StatusCode::kOk),
absl::InternalError("")),
testing::HasSubstr("whose status code INTERNAL doesn't match"));
}
TEST(StatusTestutilTest, StatusIs_WithMessage) {
EXPECT_THAT(
Result<void>{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(
Future<void>{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(
absl::InternalError("strongbad"),
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(absl::InternalError("strongbad"),
::tensorstore::StatusIs(
::testing::_, ::testing::Not(::testing::HasSubstr("good"))));
EXPECT_THAT(
absl::Status{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::Not(absl::StatusCode::kAborted),
::testing::Not(::testing::HasSubstr("good"))));
}
TEST(StatusTestutilTest, MatchesStatus) {
EXPECT_THAT(Result<void>{absl::InternalError("")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(Future<void>{absl::InternalError("")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(absl::InternalError(""),
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(absl::OkStatus(),
::tensorstore::MatchesStatus(absl::StatusCode::kOk));
}
TEST(StatusTestutilTest, MatchesStatus_Pattern) {
EXPECT_THAT(Result<void>{absl::InternalError("a")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(Future<void>{absl::InternalError("a")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(absl::InternalError("a"),
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(absl::InternalError("a"),
::testing::Not(::tensorstore::MatchesStatus(
absl::StatusCode::kInternal, "b")));
EXPECT_THAT(absl::InternalError("a"),
::testing::Not(::tensorstore::MatchesStatus(
absl::StatusCode::kCancelled, "a")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/status_testutil.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/status_testutil_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a1838971-fec5-40fa-ae1d-8854e5cd60e1 | cpp | google/tensorstore | constant_vector | tensorstore/util/constant_vector.cc | tensorstore/util/constant_vector_test.cc | #include "tensorstore/util/constant_vector.h"
#include <string>
#include "tensorstore/rank.h"
namespace tensorstore {
namespace internal_constant_vector {
const std::string kStringArray[kMaxRank] = {};
}
} | #include "tensorstore/util/constant_vector.h"
#include <string>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::GetConstantVector;
using ::tensorstore::span;
TEST(GetConstantVectorTest, RunTimeLengthInt) {
auto x = GetConstantVector<int, 3>(5);
static_assert(std::is_same_v<decltype(x), span<const int>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, ZeroRunTimeLengthInt) {
auto x = GetConstantVector<int, 3>(0);
static_assert(std::is_same_v<decltype(x), span<const int>>);
EXPECT_EQ(0, x.size());
}
TEST(GetConstantVectorTest, StaticLengthInt) {
constexpr auto x = GetConstantVector<int, 3, 5>();
static_assert(std::is_same_v<decltype(x), const span<const int, 5>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, StaticLengthIntUsingStaticRankValue) {
constexpr auto x = GetConstantVector<int, 3>(tensorstore::StaticRank<5>{});
static_assert(std::is_same_v<decltype(x), const span<const int, 5>>);
EXPECT_THAT(x, ::testing::ElementsAreArray(std::vector<int>(5, 3)));
}
TEST(GetConstantVectorTest, StaticZeroLengthInt) {
constexpr auto x = GetConstantVector<int, 3, 0>();
static_assert(std::is_same_v<decltype(x), const span<const int, 0>>);
}
TEST(GetDefaultStringVectorTest, StaticLength) {
auto x = tensorstore::GetDefaultStringVector<2>();
static_assert(std::is_same_v<decltype(x), span<const std::string, 2>>);
EXPECT_THAT(x, ::testing::ElementsAre("", ""));
}
TEST(GetDefaultStringVectorTest, DynamicLength) {
auto x = tensorstore::GetDefaultStringVector(2);
static_assert(std::is_same_v<decltype(x), span<const std::string>>);
EXPECT_THAT(x, ::testing::ElementsAre("", ""));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_vector.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_vector_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d840c5d5-dfe8-4ccf-a118-82e261fecd8b | cpp | google/tensorstore | utf8_string | tensorstore/util/utf8_string.cc | tensorstore/util/utf8_string_test.cc | #include "tensorstore/util/utf8_string.h"
#include "tensorstore/internal/riegeli/delimited.h"
#include "tensorstore/internal/utf8.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
bool Serializer<Utf8String>::Encode(EncodeSink& sink, const Utf8String& value) {
return serialization::WriteDelimited(sink.writer(), value.utf8);
}
bool Serializer<Utf8String>::Decode(DecodeSource& source, Utf8String& value) {
return serialization::ReadDelimitedUtf8(source.reader(), value.utf8);
}
}
} | #include "tensorstore/util/utf8_string.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Utf8String;
using ::tensorstore::serialization::SerializationRoundTrip;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(SerializationTest, Valid) {
TestSerializationRoundTrip(Utf8String{""});
TestSerializationRoundTrip(Utf8String{"abc"});
TestSerializationRoundTrip(Utf8String{"\xc2\x80hello\xc2\xbf"});
}
TEST(SerializationTest, Invalid) {
EXPECT_THAT(SerializationRoundTrip(Utf8String{"\xC1"}),
MatchesStatus(absl::StatusCode::kDataLoss,
"String is not valid utf-8: .*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/utf8_string.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/utf8_string_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9dc1a606-784a-4e80-ab6c-ccaaf701715c | cpp | google/tensorstore | quote_string | tensorstore/util/quote_string.cc | tensorstore/util/quote_string_test.cc | #include "tensorstore/util/quote_string.h"
#include <string>
#include <string_view>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace tensorstore {
std::string QuoteString(std::string_view s) {
return absl::StrCat(
"\"", absl::CHexEscape(absl::string_view(s.data(), s.size())), "\"");
}
} | #include "tensorstore/util/quote_string.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::QuoteString;
using ::testing::StrEq;
TEST(QuoteStringTest, Basic) {
EXPECT_THAT(QuoteString("abc "), StrEq("\"abc \""));
EXPECT_THAT(QuoteString("a\"b\n\x01"), StrEq("\"a\\\"b\\n\\x01\""));
EXPECT_THAT(QuoteString("'"), StrEq("\"\\'\""));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/quote_string.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/quote_string_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7b7f11c4-4b4f-4dbe-bb8a-0cbf5334cc5c | cpp | google/tensorstore | iterate | tensorstore/index_space/internal/iterate.cc | tensorstore/index_space/iterate_test.cc | #include "tensorstore/util/internal/iterate.h"
#include <stddef.h>
#include <algorithm>
#include <array>
#include <cassert>
#include <cstring>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/internal/iterate_impl.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
constexpr Index temp_index_buffer_size = 1024;
void MarkSingletonDimsAsSkippable(
span<const Index> input_shape,
input_dimension_iteration_flags::Bitmask* input_dimension_flags) {
for (DimensionIndex i = 0; i < input_shape.size(); ++i) {
if (input_shape[i] == 1) {
input_dimension_flags[i] = input_dimension_iteration_flags::can_skip;
}
}
}
namespace {
template <bool UseStridedLayout>
absl::Status InitializeSingleArrayIterationStateImpl(
OffsetArrayView<const void, (UseStridedLayout ? dynamic_rank : 0)> array,
TransformRep* transform, const Index* iteration_origin,
const Index* iteration_shape, SingleArrayIterationState* single_array_state,
input_dimension_iteration_flags::Bitmask* input_dimension_flags) {
if constexpr (!UseStridedLayout) {
assert(transform != nullptr);
}
const DimensionIndex output_rank =
UseStridedLayout ? array.rank() : transform->output_rank;
single_array_state->base_pointer = const_cast<void*>(array.data());
if constexpr (UseStridedLayout) {
if (!transform) {
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
const DimensionIndex input_dim = output_dim;
const Index byte_stride = array.byte_strides()[output_dim];
single_array_state->input_byte_strides[input_dim] = byte_stride;
if (iteration_shape[input_dim] != 1) {
input_dimension_flags[input_dim] |=
input_dimension_iteration_flags::strided;
}
single_array_state->base_pointer +=
internal::wrap_on_overflow::Multiply(iteration_origin[input_dim],
byte_stride);
}
return absl::OkStatus();
}
}
assert(output_rank == transform->output_rank);
const DimensionIndex input_rank = transform->input_rank;
std::fill_n(&single_array_state->input_byte_strides[0], input_rank,
static_cast<Index>(0));
span<OutputIndexMap> maps = transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const Index byte_stride =
UseStridedLayout ? array.byte_strides()[output_dim] : 1;
if (byte_stride == 0) continue;
const auto& map = maps[output_dim];
const Index output_offset = map.offset();
const Index output_stride = map.stride();
single_array_state->base_pointer +=
internal::wrap_on_overflow::Multiply(output_offset, byte_stride);
if (output_stride == 0 || map.method() == OutputIndexMethod::constant) {
if constexpr (UseStridedLayout) {
if (!Contains(array.domain()[output_dim], output_offset)) {
return MaybeAnnotateStatus(
CheckContains(array.domain()[output_dim], output_offset),
tensorstore::StrCat(
"Checking bounds of constant output index map for dimension ",
output_dim));
}
} else {
}
} else if (map.method() == OutputIndexMethod::single_input_dimension) {
const DimensionIndex input_dim = map.input_dimension();
assert(input_dim >= 0 && input_dim < input_rank);
if constexpr (UseStridedLayout) {
TENSORSTORE_ASSIGN_OR_RETURN(
IndexInterval range,
GetAffineTransformRange(
IndexInterval::UncheckedSized(iteration_origin[input_dim],
iteration_shape[input_dim]),
output_offset, output_stride),
MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Checking bounds of output index map for dimension ",
output_dim)));
if (!Contains(array.domain()[output_dim], range)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Output dimension ", output_dim, " range of ", range,
" is not contained within array domain of ",
array.domain()[output_dim]));
}
}
single_array_state->base_pointer += internal::wrap_on_overflow::Multiply(
byte_stride, internal::wrap_on_overflow::Multiply(
output_stride, iteration_origin[input_dim]));
single_array_state->input_byte_strides[input_dim] =
internal::wrap_on_overflow::Add(
single_array_state->input_byte_strides[input_dim],
internal::wrap_on_overflow::Multiply(byte_stride, output_stride));
input_dimension_flags[input_dim] |=
input_dimension_iteration_flags::strided;
} else {
const auto& index_array_data = map.index_array_data();
assert(index_array_data.rank_capacity >= input_rank);
IndexInterval index_bounds = index_array_data.index_range;
if constexpr (UseStridedLayout) {
TENSORSTORE_ASSIGN_OR_RETURN(
IndexInterval propagated_index_bounds,
GetAffineTransformDomain(array.domain()[output_dim], output_offset,
output_stride),
MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Propagating bounds from intermediate dimension ",
output_dim, ".")));
index_bounds = Intersect(propagated_index_bounds, index_bounds);
}
ByteStridedPointer<const Index> index_array_pointer =
index_array_data.element_pointer.data();
bool has_one_element = true;
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
const Index index_array_byte_stride =
index_array_data.byte_strides[input_dim];
index_array_pointer += internal::wrap_on_overflow::Multiply(
iteration_origin[input_dim], index_array_byte_stride);
if (index_array_byte_stride != 0 && iteration_shape[input_dim] != 1) {
input_dimension_flags[input_dim] |=
input_dimension_iteration_flags::array_indexed;
has_one_element = false;
}
}
if (has_one_element) {
const Index index = *index_array_pointer;
TENSORSTORE_RETURN_IF_ERROR(
CheckContains(index_bounds, index),
MaybeAnnotateStatus(
_,
tensorstore::StrCat("In index array map for output dimension ",
output_dim)));
single_array_state->base_pointer +=
internal::wrap_on_overflow::Multiply(
byte_stride,
internal::wrap_on_overflow::Multiply(output_stride, index));
} else {
DimensionIndex index_array_num =
single_array_state->num_array_indexed_output_dimensions++;
single_array_state->index_array_byte_strides[index_array_num] =
index_array_data.byte_strides;
single_array_state->index_array_pointers[index_array_num] =
index_array_pointer;
single_array_state->index_array_output_byte_strides[index_array_num] =
internal::wrap_on_overflow::Multiply(byte_stride, output_stride);
TENSORSTORE_RETURN_IF_ERROR(
ValidateIndexArrayBounds(
index_bounds,
ArrayView<const Index>(index_array_pointer.get(),
StridedLayoutView<dynamic_rank>(
input_rank, iteration_shape,
index_array_data.byte_strides))),
MaybeAnnotateStatus(
_,
tensorstore::StrCat("In index array map for output dimension ",
output_dim)));
}
}
}
return {};
}
}
absl::Status InitializeSingleArrayIterationState(
OffsetArrayView<const void> array, TransformRep* transform,
const Index* iteration_origin, const Index* iteration_shape,
SingleArrayIterationState* single_array_state,
input_dimension_iteration_flags::Bitmask* input_dimension_flags) {
return InitializeSingleArrayIterationStateImpl<true>(
array, transform, iteration_origin, iteration_shape, single_array_state,
input_dimension_flags);
}
absl::Status InitializeSingleArrayIterationState(
ElementPointer<const void> element_pointer, TransformRep* transform,
const Index* iteration_origin, const Index* iteration_shape,
SingleArrayIterationState* single_array_state,
input_dimension_iteration_flags::Bitmask* input_dimension_flags) {
return InitializeSingleArrayIterationStateImpl<false>(
element_pointer, transform, iteration_origin, iteration_shape,
single_array_state, input_dimension_flags);
}
Index IndirectInnerProduct(span<const Index> indices,
const DimensionIndex* dimension_order,
const Index* byte_strides) {
Index result = 0;
for (DimensionIndex i = 0; i < indices.size(); ++i) {
result = internal::wrap_on_overflow::Add(
internal::wrap_on_overflow::Multiply(indices[i],
byte_strides[dimension_order[i]]),
result);
}
return result;
}
void FillOffsetsArray(span<Index> offsets, span<const Index> position,
const DimensionIndex* input_dimension_order,
const SingleArrayIterationState& single_array_state,
Index final_input_dim_byte_stride,
Index final_input_dim_start_position) {
std::memset(offsets.data(), 0, sizeof(Index) * offsets.size());
for (DimensionIndex
j = 0,
num_array_indexed_output_dimensions =
single_array_state.num_array_indexed_output_dimensions;
j < num_array_indexed_output_dimensions; ++j) {
ByteStridedPointer<const Index> index_data_pointer =
single_array_state.index_array_pointers[j];
const Index* cur_byte_strides =
single_array_state.index_array_byte_strides[j];
index_data_pointer += internal_index_space::IndirectInnerProduct(
position, input_dimension_order, cur_byte_strides);
const auto final_byte_stride =
cur_byte_strides[input_dimension_order[position.size()]];
const Index output_dim_byte_stride =
single_array_state.index_array_output_byte_strides[j];
if (final_byte_stride == 0) {
const Index index_value = *index_data_pointer;
for (Index j = 0; j < offsets.size(); ++j) {
offsets[j] = internal::wrap_on_overflow::Add(
offsets[j], internal::wrap_on_overflow::Multiply(
index_value, output_dim_byte_stride));
}
} else {
index_data_pointer += internal::wrap_on_overflow::Multiply(
final_byte_stride, final_input_dim_start_position);
for (Index j = 0; j < offsets.size(); ++j) {
offsets[j] = internal::wrap_on_overflow::Add(
offsets[j], internal::wrap_on_overflow::Multiply(
*index_data_pointer, output_dim_byte_stride));
index_data_pointer += final_byte_stride;
}
}
}
if (final_input_dim_byte_stride != 0) {
for (Index j = 0; j < offsets.size(); ++j) {
offsets[j] = internal::wrap_on_overflow::Add(
offsets[j],
internal::wrap_on_overflow::Multiply(
final_input_dim_byte_stride, j + final_input_dim_start_position));
}
}
}
template <size_t Arity>
bool IterateUsingSimplifiedLayout(
const SimplifiedDimensionIterationOrder& layout,
span<const Index> input_shape,
internal::ElementwiseClosure<Arity, void*> closure, void* arg,
span<const SingleArrayIterationState, Arity> single_array_states,
std::array<ptrdiff_t, Arity> element_sizes) {
const Index final_indexed_dim_size =
layout.simplified_shape[layout.pure_strided_start_dim - 1];
std::array<const Index*, Arity> strides;
for (size_t i = 0; i < Arity; ++i) {
strides[i] = &single_array_states[i].input_byte_strides[0];
}
internal::StridedLayoutFunctionApplyer<Arity> strided_applyer(
input_shape.data(),
span(&layout.input_dimension_order[layout.pure_strided_start_dim],
&layout.input_dimension_order[layout.pure_strided_end_dim]),
strides, closure, element_sizes);
struct SingleArrayOffsetsBuffer {
Index offsets[temp_index_buffer_size];
};
const DimensionIndex last_indexed_dim = layout.pure_strided_start_dim - 1;
return IterateOverIndexRange(
span<const Index>(&layout.simplified_shape[0], last_indexed_dim),
[&](span<const Index> position) {
std::array<SingleArrayOffsetsBuffer, Arity> single_array_offset_buffers;
std::array<ByteStridedPointer<void>, Arity> pointers;
std::array<Index, Arity> final_indexed_dim_byte_strides;
for (size_t i = 0; i < Arity; ++i) {
const auto& single_array_state = single_array_states[i];
pointers[i] = single_array_state.base_pointer +
internal_index_space::IndirectInnerProduct(
position, &layout.input_dimension_order[0],
&single_array_state.input_byte_strides[0]);
final_indexed_dim_byte_strides[i] =
single_array_state
.input_byte_strides[layout.input_dimension_order
[layout.pure_strided_start_dim - 1]];
}
for (Index final_indexed_dim_start_position = 0;
final_indexed_dim_start_position < final_indexed_dim_size;
final_indexed_dim_start_position += temp_index_buffer_size) {
const Index block_size = std::min(
final_indexed_dim_size - final_indexed_dim_start_position,
temp_index_buffer_size);
for (size_t i = 0; i < Arity; ++i) {
Index* offsets = single_array_offset_buffers[i].offsets;
FillOffsetsArray(span(offsets, block_size), position,
&layout.input_dimension_order[0],
single_array_states[i],
final_indexed_dim_byte_strides[i],
final_indexed_dim_start_position);
}
if (strided_applyer.inner_size() == 1) {
std::array<internal::IterationBufferPointer, Arity>
pointers_with_offset_arrays;
for (size_t i = 0; i < Arity; ++i) {
pointers_with_offset_arrays[i] = internal::IterationBufferPointer{
pointers[i], 0,
single_array_offset_buffers[i].offsets};
}
if (!internal::InvokeElementwiseClosure(
closure, internal::IterationBufferKind::kIndexed,
{1, block_size}, pointers_with_offset_arrays, arg)) {
return false;
}
} else {
for (Index j = 0; j < block_size; ++j) {
auto cur_pointers = pointers;
for (size_t i = 0; i < Arity; ++i) {
cur_pointers[i] += single_array_offset_buffers[i].offsets[j];
}
if (!strided_applyer(cur_pointers, arg)) return false;
}
}
}
return true;
});
}
#define TENSORSTORE_INTERNAL_DO_INSTANTIATE_ITERATE_USING_SIMPLIFIED_LAYOUT( \
Arity) \
template bool IterateUsingSimplifiedLayout<Arity>( \
const SimplifiedDimensionIterationOrder& layout, \
span<const Index> input_shape, \
internal::ElementwiseClosure<Arity, void*> closure, void* arg, \
span<const SingleArrayIterationState, Arity> single_array_states, \
std::array<ptrdiff_t, Arity> element_sizes);
TENSORSTORE_INTERNAL_FOR_EACH_ARITY(
TENSORSTORE_INTERNAL_DO_INSTANTIATE_ITERATE_USING_SIMPLIFIED_LAYOUT)
#undef TENSORSTORE_INTERNAL_DO_INSTANTIATE_ITERATE_USING_SIMPLIFIED_LAYOUT
}
absl::Status ValidateIndexArrayBounds(
IndexInterval bounds,
ArrayView<const Index, dynamic_rank, offset_origin> index_array) {
const auto finite_bounds = FiniteSubset(bounds);
const Index inclusive_min = finite_bounds.inclusive_min();
const Index exclusive_max = finite_bounds.exclusive_max();
Index bad_index;
if (!IterateOverArrays(
[&](const Index* value) {
if (ABSL_PREDICT_FALSE(*value < inclusive_min ||
*value >= exclusive_max)) {
bad_index = *value;
return false;
}
return true;
},
skip_repeated_elements, index_array)) {
return CheckContains(bounds, bad_index);
}
return absl::OkStatus();
}
} | #include "tensorstore/util/iterate.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IterationConstraints;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TransformAccess;
TEST(ValidateIndexArrayBoundsTest, Basic) {
EXPECT_EQ(absl::OkStatus(),
ValidateIndexArrayBounds(IndexInterval::UncheckedClosed(5, 8),
MakeArray<Index>({5, 6, 7, 8})));
EXPECT_THAT(ValidateIndexArrayBounds(IndexInterval::UncheckedClosed(5, 8),
MakeArray<Index>({5, 6, 7, 8, 9})),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 9 is outside valid range \\[5, 9\\)"));
EXPECT_THAT(
ValidateIndexArrayBounds(IndexInterval(), MakeArray<Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(InitializeSingleArrayIterationStateTest, Basic) {
namespace flags =
tensorstore::internal_index_space::input_dimension_iteration_flags;
std::vector<flags::Bitmask> input_dimension_flags(2, 0);
auto array = tensorstore::MakeOffsetArray<int>({5, 6},
{{1, 2, 3, 4}, {5, 6, 7, 8}});
auto index_array = MakeArray<Index>({{0, 1, 1, 0}});
auto transform = tensorstore::IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2, index_array)
.Finalize()
.value();
tensorstore::internal_index_space::SingleArrayIterationState
single_array_state;
EXPECT_EQ(
absl::OkStatus(),
tensorstore::internal_index_space::InitializeSingleArrayIterationState(
array, TransformAccess::rep(transform),
transform.input_origin().data(), transform.input_shape().data(),
&single_array_state, input_dimension_flags.data()));
EXPECT_EQ(1, single_array_state.num_array_indexed_output_dimensions);
EXPECT_THAT(input_dimension_flags,
::testing::ElementsAre(flags::strided, flags::array_indexed));
EXPECT_EQ(index_array.data(), single_array_state.index_array_pointers[0]);
EXPECT_EQ(transform.output_index_map(1)
.index_array()
.layout()
.byte_strides()
.data(),
single_array_state.index_array_byte_strides[0]);
EXPECT_EQ(&array(6, 7), single_array_state.base_pointer);
EXPECT_THAT(single_array_state.index_array_output_byte_strides_span(),
::testing::ElementsAre(2 * sizeof(int)));
EXPECT_THAT(
span(single_array_state.input_byte_strides).first(transform.input_rank()),
::testing::ElementsAre(-4 * static_cast<Index>(sizeof(int)), 0));
}
TEST(ComputeDimensionIterationOrderTest, Basic) {
namespace flags =
tensorstore::internal_index_space::input_dimension_iteration_flags;
using ::tensorstore::internal_index_space::ComputeDimensionIterationOrder;
const flags::Bitmask input_dimension_flags[] = {
flags::can_skip, flags::strided, flags::array_indexed,
flags::array_indexed, flags::can_skip, flags::strided};
{
const int order[] = {0, 3, 5, 4, 2, 1};
auto layout =
tensorstore::internal_index_space::ComputeDimensionIterationOrder(
input_dimension_flags, {},
[&](DimensionIndex a, DimensionIndex b) {
return order[a] < order[b];
});
EXPECT_EQ(2, layout.pure_strided_start_dim);
EXPECT_EQ(4, layout.pure_strided_end_dim);
EXPECT_THAT(span(layout.input_dimension_order).first(4),
::testing::ElementsAre(3, 2, 5, 1));
}
{
auto layout =
tensorstore::internal_index_space::ComputeDimensionIterationOrder(
input_dimension_flags,
tensorstore::ContiguousLayoutOrder::c,
[](DimensionIndex a, DimensionIndex b) { return false; });
EXPECT_EQ(3, layout.pure_strided_start_dim);
EXPECT_EQ(4, layout.pure_strided_end_dim);
EXPECT_THAT(span(layout.input_dimension_order).first(4),
::testing::ElementsAre(1, 2, 3, 5));
}
{
auto layout = ComputeDimensionIterationOrder(
input_dimension_flags,
tensorstore::ContiguousLayoutOrder::fortran,
[](DimensionIndex a, DimensionIndex b) { return false; });
EXPECT_EQ(3, layout.pure_strided_start_dim);
EXPECT_EQ(4, layout.pure_strided_end_dim);
EXPECT_THAT(span(layout.input_dimension_order).first(4),
::testing::ElementsAre(5, 3, 2, 1));
}
}
TEST(SimplifyDimensionIterationOrderTest, Rank5) {
tensorstore::internal_index_space::DimensionIterationOrder original_layout;
original_layout.input_dimension_order[0] = 7;
original_layout.input_dimension_order[1] = 9;
original_layout.input_dimension_order[2] = 3;
original_layout.input_dimension_order[3] = 1;
original_layout.input_dimension_order[4] = 5;
original_layout.pure_strided_start_dim = 3;
original_layout.pure_strided_end_dim = 5;
const Index input_shape[] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
{
auto result = SimplifyDimensionIterationOrder(
original_layout, input_shape,
[&](DimensionIndex a, DimensionIndex b, DimensionIndex size) {
EXPECT_EQ(input_shape[b], size);
switch (b) {
case 9:
EXPECT_EQ(7, a);
return true;
case 3:
EXPECT_EQ(9, a);
return false;
default:
ADD_FAILURE();
return false;
}
});
EXPECT_THAT(span(result.input_dimension_order).first(4),
::testing::ElementsAre(9, 3, 1, 5));
EXPECT_THAT(span(result.simplified_shape).first(4),
::testing::ElementsAre(9 * 11, 5, 3, 7));
EXPECT_THAT(2, result.pure_strided_start_dim);
EXPECT_THAT(4, result.pure_strided_end_dim);
}
}
TEST(SimplifyDimensionIterationOrderTest, Rank1) {
tensorstore::internal_index_space::DimensionIterationOrder original_layout;
original_layout.input_dimension_order[0] = 0;
original_layout.pure_strided_start_dim = 1;
original_layout.pure_strided_end_dim = 1;
const Index input_shape[] = {5};
{
auto result = SimplifyDimensionIterationOrder(
original_layout, input_shape,
[&](DimensionIndex a, DimensionIndex b, DimensionIndex size) {
return false;
});
EXPECT_THAT(span(result.input_dimension_order).first(1),
::testing::ElementsAre(0));
EXPECT_THAT(span(result.simplified_shape).first(1),
::testing::ElementsAre(5));
EXPECT_THAT(1, result.pure_strided_start_dim);
EXPECT_THAT(1, result.pure_strided_end_dim);
}
}
TEST(IterateOverTransformedArraysTest, StridedOnly) {
auto source_array = MakeArray<const float>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto dest_array = tensorstore::AllocateArray<float>(
{2, 4, 2}, tensorstore::c_order, tensorstore::value_init);
TENSORSTORE_ASSERT_OK(IterateOverTransformedArrays(
[&](const float* source_ptr, float* dest_ptr) {
*dest_ptr = *source_ptr;
},
{},
Dims(1).TranslateClosedInterval(1, 2)(source_array),
Dims(1).IndexSlice(0)(dest_array)));
EXPECT_EQ(MakeArray<float>({{{2, 3}, {0, 0}, {0, 0}, {0, 0}},
{{6, 7}, {0, 0}, {0, 0}, {0, 0}}}),
dest_array);
}
TEST(IterateOverTransformedArraysTest, ErrorHandling) {
EXPECT_THAT(IterateOverTransformedArrays(
[&](const float* source_ptr, float* dest_ptr) {},
{},
tensorstore::ArrayView<const float>(
tensorstore::MakeScalarArray<float>(1)),
MakeArray<float>({1}))
.status(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Transformed array input ranks \\{0, 1\\} do not all match"));
}
TEST(IterateOverTransformedArrayTest, EarlyStoppingWithoutStatus) {
auto array_a = MakeArray<float>({5, 6, 7, 9});
auto array_b = MakeArray<float>({5, 6, 8, 9});
ASSERT_THAT(IterateOverTransformedArrays(
[&](const float* a_ptr, float* b_ptr) {
if (*a_ptr != *b_ptr) {
return false;
}
*b_ptr = 0;
return true;
},
{}, array_a, array_b),
::testing::Optional(false));
EXPECT_EQ(MakeArray<float>({5, 6, 7, 9}), array_a);
EXPECT_EQ(MakeArray<float>({0, 0, 8, 9}), array_b);
}
TEST(IterateOverTransformedArrayTest, EarlyStoppingWithStatus) {
auto array_a = MakeArray<float>({5, 6, 7, 9});
auto array_b = MakeArray<float>({5, 6, 8, 9});
absl::Status status;
ASSERT_THAT(IterateOverTransformedArrays(
[&](const float* a_ptr, float* b_ptr) {
if (*a_ptr != *b_ptr) {
status = absl::UnknownError(
tensorstore::StrCat(*a_ptr, " ", *b_ptr));
return false;
}
*b_ptr = 0;
return true;
},
{}, array_a, array_b),
::testing::Optional(false));
EXPECT_THAT(status, MatchesStatus(absl::StatusCode::kUnknown, "7 8"));
EXPECT_EQ(MakeArray<float>({5, 6, 7, 9}), array_a);
EXPECT_EQ(MakeArray<float>({0, 0, 8, 9}), array_b);
}
TEST(IterateOverTransformedArraysTest, IndexArrays) {
auto source_array = MakeArray<const float>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto dest_array = tensorstore::AllocateArray<float>(
{2, 4, 2}, tensorstore::c_order, tensorstore::value_init);
TENSORSTORE_ASSERT_OK(IterateOverTransformedArrays(
[&](const float* source_ptr, float* dest_ptr) {
*dest_ptr = *source_ptr;
},
{},
Dims(0).MoveToBack()(source_array),
Dims(0, 1)
.IndexArraySlice(MakeArray<Index>({1, 0, 1, 1}),
MakeArray<Index>({0, 1, 2, 3}))
.MoveToFront()(dest_array)));
EXPECT_EQ(MakeArray<float>({{{0, 0}, {2, 6}, {0, 0}, {0, 0}},
{{1, 5}, {0, 0}, {3, 7}, {4, 8}}}),
dest_array);
}
TEST(IterateOverTransformedArraysTest, SingleElementIndexArray) {
EXPECT_EQ(tensorstore::TransformArray(
MakeArray<float>({1, 2, 3}),
tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({2}))
.Finalize()
.value())
.value(),
MakeArray<float>({3}));
}
TEST(IterateOverTransformedArraysTest, CombineDimensions) {
EXPECT_EQ(
tensorstore::TransformArray(
MakeArray<float>({1, 2, 3, 4}),
tensorstore::IndexTransformBuilder<2, 1>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{0, 1}, {1, 3}}))
.Finalize()
.value())
.value(),
MakeArray<float>({{1, 2}, {2, 4}}));
}
TEST(IterateOverTransformedArraysTest, NotCombinableNonIndexedDimensions) {
EXPECT_EQ(tensorstore::TransformArray(
MakeArray<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}),
tensorstore::IndexTransformBuilder<3, 3>()
.input_origin({0, 0, 0})
.input_shape({2, 2, 2})
.output_single_input_dimension(0, 0)
.output_index_array(1, 0, 1, MakeArray<Index>({{{0}, {1}}}))
.output_single_input_dimension(2, 2)
.Finalize()
.value())
.value(),
MakeArray<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/iterate.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/iterate_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c3b5f314-0c26-448c-8d86-c1abcf0736a6 | cpp | google/tensorstore | driver | tensorstore/kvstore/ocdbt/driver.cc | tensorstore/kvstore/ocdbt/distributed/driver_test.cc | #include "tensorstore/kvstore/ocdbt/driver.h"
#include <stddef.h>
#include <stdint.h>
#include <cstring>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/time/time.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/ref_counted_string.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/btree_writer.h"
#include "tensorstore/kvstore/ocdbt/config.h"
#include "tensorstore/kvstore/ocdbt/distributed/btree_writer.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security_registry.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/io/io_handle_impl.h"
#include "tensorstore/kvstore/ocdbt/io_handle.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/btree_writer.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/list.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/read.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/transactional_btree_writer.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/std_optional.h"
using ::tensorstore::kvstore::ListReceiver;
namespace tensorstore {
namespace internal_ocdbt {
namespace {
namespace jb = ::tensorstore::internal_json_binding;
struct OcdbtMetrics : public internal_kvstore::CommonReadMetrics,
public internal_kvstore::CommonWriteMetrics {};
auto ocdbt_metrics = []() -> OcdbtMetrics {
return {TENSORSTORE_KVSTORE_COMMON_READ_METRICS(ocdbt),
TENSORSTORE_KVSTORE_COMMON_WRITE_METRICS(ocdbt)};
}();
constexpr absl::Duration kDefaultLeaseDuration = absl::Seconds(10);
constexpr size_t kDefaultTargetBufferSize = 2u << 30;
struct OcdbtCoordinatorResourceTraits
: public internal::ContextResourceTraits<OcdbtCoordinatorResource> {
using Spec = OcdbtCoordinatorResource::Spec;
using Resource = OcdbtCoordinatorResource::Resource;
static Spec Default() { return {}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(
jb::Member("address", jb::Projection<&Spec::address>()),
jb::Member("lease_duration", jb::Projection<&Spec::lease_duration>()),
jb::Member("security", jb::Projection<&Spec::security>(
RpcSecurityMethodJsonBinder)));
}
static Result<Resource> Create(
const Spec& spec, internal::ContextResourceCreationContext context) {
return spec;
}
static Spec GetSpec(const Resource& resource,
const internal::ContextSpecBuilder& builder) {
return resource;
}
};
const internal::ContextResourceRegistration<OcdbtCoordinatorResourceTraits>
registration;
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
OcdbtDriverSpecData,
jb::Object(
jb::Member("base", jb::Projection<&OcdbtDriverSpecData::base>()),
jb::Member("manifest",
jb::Projection<&OcdbtDriverSpecData::manifest>()),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->base.path);
if (obj->manifest) {
internal::EnsureDirectoryPath(obj->manifest->path);
}
return absl::OkStatus();
}),
jb::Member("config", jb::Projection<&OcdbtDriverSpecData::config>(
jb::DefaultInitializedValue())),
jb::Projection<&OcdbtDriverSpecData::data_file_prefixes>(jb::Sequence(
jb::Member("value_data_prefix",
jb::Projection<&DataFilePrefixes::value>(
jb::DefaultValue([](auto* v) { *v = "d/"; }))),
jb::Member("btree_node_data_prefix",
jb::Projection<&DataFilePrefixes::btree_node>(
jb::DefaultValue([](auto* v) { *v = "d/"; }))),
jb::Member("version_tree_node_data_prefix",
jb::Projection<&DataFilePrefixes::version_tree_node>(
jb::DefaultValue([](auto* v) { *v = "d/"; }))))),
jb::Member("assume_config",
jb::Projection<&OcdbtDriverSpecData::assume_config>(
jb::DefaultInitializedValue())),
jb::Member(
"experimental_read_coalescing_threshold_bytes",
jb::Projection<&OcdbtDriverSpecData::
experimental_read_coalescing_threshold_bytes>()),
jb::Member(
"experimental_read_coalescing_merged_bytes",
jb::Projection<&OcdbtDriverSpecData::
experimental_read_coalescing_merged_bytes>()),
jb::Member(
"experimental_read_coalescing_interval",
jb::Projection<
&OcdbtDriverSpecData::experimental_read_coalescing_interval>()),
jb::Member(
"target_data_file_size",
jb::Projection<&OcdbtDriverSpecData::target_data_file_size>()),
jb::Member("coordinator",
jb::Projection<&OcdbtDriverSpecData::coordinator>()),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&OcdbtDriverSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<&OcdbtDriverSpecData::data_copy_concurrency>())));
Result<kvstore::Spec> OcdbtDriverSpec::GetBase(std::string_view path) const {
return data_.base;
}
Future<kvstore::DriverPtr> OcdbtDriverSpec::DoOpen() const {
auto base_kvstore_future = kvstore::Open(data_.base);
Future<kvstore::KvStore> manifest_kvstore_future =
data_.manifest ? kvstore::Open(*data_.manifest)
: Future<kvstore::KvStore>(kvstore::KvStore{});
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const OcdbtDriverSpec>(this)](
kvstore::KvStore& base_kvstore,
kvstore::KvStore& manifest_kvstore) -> Result<kvstore::DriverPtr> {
auto driver = internal::MakeIntrusivePtr<OcdbtDriver>();
driver->base_ = std::move(base_kvstore);
driver->manifest_kvstore_ = std::move(manifest_kvstore);
auto supported_manifest_features =
driver->base_.driver->GetSupportedFeatures(KeyRange::Prefix(
tensorstore::StrCat(driver->base_.path, "manifest.")));
driver->cache_pool_ = spec->data_.cache_pool;
driver->data_copy_concurrency_ = spec->data_.data_copy_concurrency;
driver->data_file_prefixes_ = spec->data_.data_file_prefixes;
driver->experimental_read_coalescing_threshold_bytes_ =
spec->data_.experimental_read_coalescing_threshold_bytes;
driver->experimental_read_coalescing_merged_bytes_ =
spec->data_.experimental_read_coalescing_merged_bytes;
driver->experimental_read_coalescing_interval_ =
spec->data_.experimental_read_coalescing_interval;
driver->target_data_file_size_ = spec->data_.target_data_file_size;
std::optional<ReadCoalesceOptions> read_coalesce_options;
if (driver->experimental_read_coalescing_threshold_bytes_ ||
driver->experimental_read_coalescing_merged_bytes_ ||
driver->experimental_read_coalescing_interval_) {
read_coalesce_options.emplace();
read_coalesce_options->max_overhead_bytes_per_request =
static_cast<int64_t>(
driver->experimental_read_coalescing_threshold_bytes_
.value_or(0));
read_coalesce_options->max_merged_bytes_per_request =
static_cast<int64_t>(
driver->experimental_read_coalescing_merged_bytes_.value_or(
0));
read_coalesce_options->max_interval =
driver->experimental_read_coalescing_interval_.value_or(
absl::ZeroDuration());
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto config_state,
ConfigState::Make(spec->data_.config, supported_manifest_features,
spec->data_.assume_config));
driver->io_handle_ = internal_ocdbt::MakeIoHandle(
driver->data_copy_concurrency_, driver->cache_pool_->get(),
driver->base_,
driver->manifest_kvstore_.driver ? driver->manifest_kvstore_
: driver->base_,
std::move(config_state), driver->data_file_prefixes_,
driver->target_data_file_size_.value_or(kDefaultTargetBufferSize),
std::move(read_coalesce_options));
driver->btree_writer_ =
MakeNonDistributedBtreeWriter(driver->io_handle_);
driver->coordinator_ = spec->data_.coordinator;
if (!driver->coordinator_->address) {
driver->btree_writer_ =
MakeNonDistributedBtreeWriter(driver->io_handle_);
return driver;
}
DistributedBtreeWriterOptions options;
options.io_handle = driver->io_handle_;
options.coordinator_address = *driver->coordinator_->address;
options.security = driver->coordinator_->security;
if (!options.security) {
options.security = GetInsecureRpcSecurityMethod();
}
options.lease_duration = driver->coordinator_->lease_duration.value_or(
kDefaultLeaseDuration);
TENSORSTORE_ASSIGN_OR_RETURN(auto base_spec,
driver->base_.spec(MinimalSpec{}));
TENSORSTORE_ASSIGN_OR_RETURN(auto base_spec_json, base_spec.ToJson());
options.storage_identifier = base_spec_json.dump();
driver->btree_writer_ = MakeDistributedBtreeWriter(std::move(options));
return driver;
},
std::move(base_kvstore_future), std::move(manifest_kvstore_future));
}
absl::Status OcdbtDriverSpec::ApplyOptions(
kvstore::DriverSpecOptions&& options) {
if (options.minimal_spec) {
data_.config = {};
data_.assume_config = false;
}
return data_.base.driver.Set(std::move(options));
}
absl::Status OcdbtDriver::GetBoundSpecData(OcdbtDriverSpecData& spec) const {
TENSORSTORE_ASSIGN_OR_RETURN(spec.base.driver, base_.driver->GetBoundSpec());
spec.base.path = base_.path;
if (manifest_kvstore_.driver) {
auto& manifest_spec = spec.manifest.emplace();
TENSORSTORE_ASSIGN_OR_RETURN(manifest_spec.driver,
base_.driver->GetBoundSpec());
manifest_spec.path = manifest_kvstore_.path;
}
spec.data_copy_concurrency = data_copy_concurrency_;
spec.cache_pool = cache_pool_;
spec.config = io_handle_->config_state->GetConstraints();
spec.assume_config = io_handle_->config_state->assume_config();
spec.data_file_prefixes = data_file_prefixes_;
spec.experimental_read_coalescing_threshold_bytes =
experimental_read_coalescing_threshold_bytes_;
spec.experimental_read_coalescing_merged_bytes =
experimental_read_coalescing_merged_bytes_;
spec.experimental_read_coalescing_interval =
experimental_read_coalescing_interval_;
spec.target_data_file_size = target_data_file_size_;
spec.coordinator = coordinator_;
return absl::Status();
}
kvstore::SupportedFeatures OcdbtDriver::GetSupportedFeatures(
const KeyRange& key_range) const {
return kvstore::SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
kvstore::SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
Future<kvstore::ReadResult> OcdbtDriver::Read(kvstore::Key key,
kvstore::ReadOptions options) {
ocdbt_metrics.read.Increment();
return internal_ocdbt::NonDistributedRead(io_handle_, std::move(key),
std::move(options));
}
void OcdbtDriver::ListImpl(kvstore::ListOptions options,
ListReceiver receiver) {
ocdbt_metrics.list.Increment();
return internal_ocdbt::NonDistributedList(io_handle_, std::move(options),
std::move(receiver));
}
Future<TimestampedStorageGeneration> OcdbtDriver::Write(
Key key, std::optional<Value> value, WriteOptions options) {
ocdbt_metrics.write.Increment();
return btree_writer_->Write(std::move(key), std::move(value),
std::move(options));
}
Future<const void> OcdbtDriver::DeleteRange(KeyRange range) {
ocdbt_metrics.delete_range.Increment();
return btree_writer_->DeleteRange(std::move(range));
}
Future<const void> OcdbtDriver::ExperimentalCopyRangeFrom(
const internal::OpenTransactionPtr& transaction, const KvStore& source,
std::string target_prefix, kvstore::CopyRangeOptions options) {
if (typeid(*source.driver) == typeid(OcdbtDriver)) {
auto& source_driver = static_cast<OcdbtDriver&>(*source.driver);
if (source.transaction != no_transaction) {
return absl::UnimplementedError("Source transactions not supported");
}
if (source_driver.base_.driver == base_.driver &&
absl::StartsWith(source_driver.base_.path, base_.path)) {
auto [promise, future] = PromiseFuturePair<void>::Make();
auto manifest_future =
source_driver.io_handle_->GetManifest(options.source_staleness_bound);
LinkValue(
[self = internal::IntrusivePtr<OcdbtDriver>(this),
target_prefix = std::move(target_prefix),
data_path_prefix =
source_driver.base_.path.substr(base_.path.size()),
source_range =
KeyRange::AddPrefix(source.path, options.source_range),
source_prefix_length = source.path.size(),
transaction = std::move(transaction)](
Promise<void> promise,
ReadyFuture<const ManifestWithTime> future) mutable {
auto& manifest_with_time = future.value();
if (!manifest_with_time.manifest) {
promise.SetResult(absl::OkStatus());
return;
}
auto& manifest = *manifest_with_time.manifest;
auto& latest_version = manifest.latest_version();
if (latest_version.root.location.IsMissing()) {
promise.SetResult(absl::OkStatus());
return;
}
BtreeWriter::CopySubtreeOptions copy_node_options;
copy_node_options.node = latest_version.root;
if (!data_path_prefix.empty()) {
auto& base_path =
copy_node_options.node.location.file_id.base_path;
internal::RefCountedStringWriter base_path_writer(
data_path_prefix.size() + base_path.size());
std::memcpy(base_path_writer.data(), data_path_prefix.data(),
data_path_prefix.size());
std::memcpy(base_path_writer.data() + data_path_prefix.size(),
base_path.data(), base_path.size());
base_path = std::move(base_path_writer);
}
copy_node_options.node_height = latest_version.root_height;
copy_node_options.range = std::move(source_range);
copy_node_options.strip_prefix_length = source_prefix_length;
copy_node_options.add_prefix = std::move(target_prefix);
LinkResult(std::move(promise),
transaction ? internal_ocdbt::AddCopySubtree(
&*self, *self->io_handle_, transaction,
std::move(copy_node_options))
: self->btree_writer_->CopySubtree(
std::move(copy_node_options)));
},
std::move(promise), std::move(manifest_future));
return std::move(future);
}
}
return kvstore::Driver::ExperimentalCopyRangeFrom(
transaction, source, std::move(target_prefix), std::move(options));
}
std::string OcdbtDriver::DescribeKey(std::string_view key) {
return tensorstore::StrCat(tensorstore::QuoteString(key),
" in OCDBT database at ",
io_handle_->DescribeLocation());
}
Result<KvStore> OcdbtDriver::GetBase(std::string_view path,
const Transaction& transaction) const {
return base_;
}
absl::Status OcdbtDriver::ReadModifyWrite(
internal::OpenTransactionPtr& transaction, size_t& phase, Key key,
ReadModifyWriteSource& source) {
if (!transaction || !transaction->atomic() || coordinator_->address) {
return kvstore::Driver::ReadModifyWrite(transaction, phase, std::move(key),
source);
}
return internal_ocdbt::AddReadModifyWrite(this, *io_handle_, transaction,
phase, std::move(key), source);
}
absl::Status OcdbtDriver::TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) {
if (!transaction->atomic() || coordinator_->address) {
return kvstore::Driver::TransactionalDeleteRange(transaction,
std::move(range));
}
return internal_ocdbt::AddDeleteRange(this, *io_handle_, transaction,
std::move(range));
}
}
}
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::internal_ocdbt::OcdbtDriverSpec>
registration;
} | #include "tensorstore/kvstore/ocdbt/driver.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <random>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/test_util.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::GetMap;
using ::tensorstore::internal_ocdbt::OcdbtDriver;
using ::tensorstore::internal_ocdbt::ReadManifest;
using ::tensorstore::ocdbt::CoordinatorServer;
class DistributedTest : public ::testing::Test {
protected:
CoordinatorServer coordinator_server_;
std::string coordinator_address_;
Context::Spec context_spec;
DistributedTest() {
::nlohmann::json security_json = ::nlohmann::json::value_t::discarded;
{
CoordinatorServer::Options options;
options.spec = CoordinatorServer::Spec::FromJson(
{{"bind_addresses", {"localhost:0"}},
{"security", security_json}})
.value();
TENSORSTORE_CHECK_OK_AND_ASSIGN(
coordinator_server_, CoordinatorServer::Start(std::move(options)));
}
assert(coordinator_server_.port() != 0);
coordinator_address_ =
tensorstore::StrCat("localhost:", coordinator_server_.port());
TENSORSTORE_CHECK_OK_AND_ASSIGN(
context_spec,
Context::Spec::FromJson({{"ocdbt_coordinator",
{{"address", coordinator_address_},
{"security", security_json}}}}));
}
};
TEST_F(DistributedTest, WriteSingleKey) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "ocdbt"}, {"base", "memory:
Context(context_spec))
.result());
auto& driver = static_cast<OcdbtDriver&>(*store.driver);
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "a", absl::Cord("value")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto manifest, ReadManifest(driver));
ASSERT_TRUE(manifest);
auto& version = manifest->latest_version();
EXPECT_EQ(2, version.generation_number);
EXPECT_FALSE(version.root.location.IsMissing());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto map, GetMap(store));
EXPECT_THAT(
map, ::testing::ElementsAre(::testing::Pair("a", absl::Cord("value"))));
}
TEST_F(DistributedTest, WriteTwoKeys) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "ocdbt"}, {"base", "memory:
Context(context_spec))
.result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "testa", absl::Cord("a")));
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "testb", absl::Cord("b")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto map, GetMap(store));
EXPECT_THAT(
map, ::testing::ElementsAre(::testing::Pair("testa", absl::Cord("a")),
::testing::Pair("testb", absl::Cord("b"))));
}
TEST_F(DistributedTest, BasicFunctionality) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "ocdbt"}, {"base", "memory:
Context(context_spec))
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(DistributedTest, BasicFunctionalityMinArity) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::kvstore::Open({{"driver", "ocdbt"},
{"base", "memory:
{"config", {{"max_decoded_node_bytes", 1}}}},
Context(context_spec))
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(DistributedTest, BasicFunctionalityMinArityNoInline) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::kvstore::Open({{"driver", "ocdbt"},
{"base", "memory:
{"config",
{
{"max_decoded_node_bytes", 1},
{"max_inline_value_bytes", 0},
}}},
Context(context_spec))
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(DistributedTest, TwoCooperators) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
::nlohmann::json base_kvs_store_spec{{"driver", "file"},
{"path", tempdir.path() + "/"}};
::nlohmann::json kvs_spec{
{"driver", "ocdbt"},
{"base", base_kvs_store_spec},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store1, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store1, "testa", absl::Cord("a")));
TENSORSTORE_ASSERT_OK(kvstore::Write(store2, "testb", absl::Cord("b")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto map, GetMap(store1));
EXPECT_THAT(
map, ::testing::ElementsAre(::testing::Pair("testa", absl::Cord("a")),
::testing::Pair("testb", absl::Cord("b"))));
}
TEST_F(DistributedTest, MultipleCooperatorsManyWrites) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
::nlohmann::json base_kvs_store_spec{{"driver", "file"},
{"path", tempdir.path() + "/"}};
::nlohmann::json kvs_spec{
{"driver", "ocdbt"},
{"base", base_kvs_store_spec},
{"config", {{"max_decoded_node_bytes", 500}}},
};
constexpr size_t kNumCooperators = 3;
constexpr size_t kNumWrites = 30;
constexpr size_t kIterations = 5;
std::vector<kvstore::KvStore> stores;
for (size_t i = 0; i < kNumCooperators; ++i) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open(kvs_spec, Context(context_spec)).result());
stores.push_back(store);
}
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_OCDBT_DRIVER_TEST_SEED")};
for (size_t iter = 0; iter < kIterations; ++iter) {
std::vector<tensorstore::AnyFuture> write_futures;
for (size_t i = 0; i < kNumWrites; ++i) {
auto k = absl::Uniform<uint16_t>(gen);
write_futures.push_back(kvstore::Write(stores[i % kNumCooperators],
absl::StrFormat("%04x", k),
absl::Cord("a")));
}
for (auto& future : write_futures) {
TENSORSTORE_ASSERT_OK(future.status());
}
}
}
TEST_F(DistributedTest, TwoCooperatorsManifestDeleted) {
::nlohmann::json base_kvs_store_spec = "memory:
::nlohmann::json kvs_spec{
{"driver", "ocdbt"},
{"base", base_kvs_store_spec},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store1, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store1, "testa", absl::Cord("a")));
EXPECT_THAT(kvstore::Write(store2, "testb", absl::Cord("b")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST_F(DistributedTest, UnmodifiedNode) {
tensorstore::internal_ocdbt::TestUnmodifiedNode(Context(context_spec));
}
TEST_F(DistributedTest, ManifestDeleted) {
auto context = Context(context_spec);
::nlohmann::json base_kvs_store_spec = "memory:
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "ocdbt"}, {"base", base_kvs_store_spec}},
context)
.result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "testa", absl::Cord("a")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store, kvstore::Open(base_kvs_store_spec, context).result());
TENSORSTORE_ASSERT_OK(kvstore::Delete(base_store, "manifest.ocdbt"));
EXPECT_THAT(kvstore::Write(store, "testb", absl::Cord("b")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/driver.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/distributed/driver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
44ef2692-2606-4e6a-bfc5-981b4ffcf957 | cpp | google/tensorstore | kvs_backed_chunk_driver | tensorstore/driver/kvs_backed_chunk_driver.cc | tensorstore/driver/kvs_backed_chunk_driver_test.cc | #include "tensorstore/driver/kvs_backed_chunk_driver.h"
#include <stddef.h>
#include <cassert>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/absl_log.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/kvs_backed_chunk_driver_impl.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/box_difference.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/async_initialized_cache_mixin.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/chunk_cache.h"
#include "tensorstore/internal/cache/kvs_backed_chunk_cache.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/staleness_bound.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/open_mode_spec.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/open_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/resize_options.h"
#include "tensorstore/schema.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#ifndef TENSORSTORE_KVS_DRIVER_DEBUG
#define TENSORSTORE_KVS_DRIVER_DEBUG 0
#endif
namespace tensorstore {
namespace internal_kvs_backed_chunk_driver {
MetadataOpenState::~MetadataOpenState() = default;
DataCacheBase::~DataCacheBase() = default;
Result<IndexTransform<>> DataCacheBase::GetExternalToInternalTransform(
const void* metadata, size_t component_index) {
return IndexTransform<>();
}
MetadataOpenState::MetadataOpenState(Initializer initializer)
: PrivateOpenState{std::move(initializer.request.transaction),
std::move(initializer.request.batch),
std::move(initializer.spec),
initializer.request.read_write_mode} {
request_time_ = absl::Now();
}
std::string MetadataOpenState::GetMetadataCacheKey() { return {}; }
Result<kvstore::DriverPtr> MetadataOpenState::GetMetadataKeyValueStore(
kvstore::DriverPtr base_kv_store) {
return base_kv_store;
}
Result<kvstore::DriverPtr> OpenState::GetDataKeyValueStore(
kvstore::DriverPtr base_kv_store, const void* metadata) {
return base_kv_store;
}
ReadWriteMode MetadataOpenState::GetReadWriteMode(const void* metadata) {
return ReadWriteMode::read_write;
}
AtomicUpdateConstraint MetadataOpenState::GetCreateConstraint() {
return AtomicUpdateConstraint::kRequireMissing;
}
bool OpenState::DataCacheUsesMetadataCachePool(const void* metadata_ptr) {
return false;
}
MetadataCache::MetadataCache(Initializer initializer)
: Base(kvstore::DriverPtr()),
data_copy_concurrency_(std::move(initializer.data_copy_concurrency)),
metadata_cache_pool_(std::move(initializer.cache_pool)) {}
DataCacheBase::DataCacheBase(Initializer&& initializer)
: metadata_cache_entry_(std::move(initializer.metadata_cache_entry)),
initial_metadata_(std::move(initializer.metadata)),
cache_pool_(std::move(initializer.cache_pool)) {}
DataCache::DataCache(Initializer&& initializer,
internal::ChunkGridSpecification&& grid)
: KvsBackedChunkCache(std::move(initializer.store)),
ChunkedDataCacheBase(std::move(initializer)),
grid_(std::move(grid)) {}
namespace {
using MetadataPtr = std::shared_ptr<const void>;
const char invalid_metadata = 0;
absl::Status ShapeConstraintError(DimensionIndex output_dim,
DimensionIndex affected_inclusive_min,
DimensionIndex affected_exclusive_max) {
assert(affected_inclusive_min != affected_exclusive_max);
if (affected_inclusive_min < affected_exclusive_max) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Resize operation would also affect output dimension ", output_dim,
" over the interval ",
IndexInterval::UncheckedHalfOpen(affected_inclusive_min,
affected_exclusive_max),
" but `resize_tied_bounds` was not specified"));
}
return absl::FailedPreconditionError(tensorstore::StrCat(
"Resize operation would also affect output dimension ", output_dim,
" over the out-of-bounds interval ",
IndexInterval::UncheckedHalfOpen(affected_exclusive_max,
affected_inclusive_min)));
}
IndexInterval GetNewIndexInterval(IndexInterval existing,
Index new_inclusive_min,
Index new_exclusive_max) {
return IndexInterval::UncheckedHalfOpen(
ExplicitIndexOr(new_inclusive_min, existing.inclusive_min()),
ExplicitIndexOr(new_exclusive_max, existing.exclusive_max()));
}
absl::Status ValidateResizeDomainConstraint(
BoxView<> current_domain, span<const Index> inclusive_min_constraint,
span<const Index> exclusive_max_constraint) {
assert(current_domain.rank() == inclusive_min_constraint.size());
assert(current_domain.rank() == exclusive_max_constraint.size());
for (DimensionIndex i = 0; i < current_domain.rank(); ++i) {
const IndexInterval cur_interval = current_domain[i];
if (!ImplicitOrEqual(inclusive_min_constraint[i],
cur_interval.inclusive_min())) {
return ShapeConstraintError(i, cur_interval.inclusive_min(),
inclusive_min_constraint[i]);
}
if (!ImplicitOrEqual(exclusive_max_constraint[i],
cur_interval.exclusive_max())) {
return ShapeConstraintError(i, exclusive_max_constraint[i],
cur_interval.exclusive_max());
}
}
return absl::OkStatus();
}
absl::Status ValidateExpandShrinkConstraints(
BoxView<> current_domain, span<const Index> new_inclusive_min,
span<const Index> new_exclusive_max, bool expand_only, bool shrink_only) {
assert(current_domain.rank() == new_inclusive_min.size());
assert(current_domain.rank() == new_exclusive_max.size());
for (DimensionIndex i = 0; i < current_domain.rank(); ++i) {
const IndexInterval cur_interval = current_domain[i];
const IndexInterval new_interval = GetNewIndexInterval(
cur_interval, new_inclusive_min[i], new_exclusive_max[i]);
if (shrink_only && !Contains(cur_interval, new_interval)) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Resize operation would expand output dimension ",
i, " from ", cur_interval, " to ", new_interval,
" but `shrink_only` was specified"));
}
if (expand_only && !Contains(new_interval, cur_interval)) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Resize operation would shrink output dimension ",
i, " from ", cur_interval, " to ", new_interval,
" but `expand_only` was specified"));
}
}
return absl::OkStatus();
}
std::string GetMetadataMissingErrorMessage(
MetadataCache::Entry* metadata_cache_entry) {
return tensorstore::StrCat(
"Metadata at ",
GetOwningCache(*metadata_cache_entry)
.kvstore_driver()
->DescribeKey(metadata_cache_entry->GetKeyValueStoreKey()),
" does not exist");
}
absl::Status ValidateNewMetadata(DataCacheBase* cache,
const void* new_metadata) {
if (!new_metadata) {
return absl::FailedPreconditionError(
GetMetadataMissingErrorMessage(cache->metadata_cache_entry_.get()));
}
auto* initial_metadata = cache->initial_metadata_.get();
if (initial_metadata != new_metadata) {
TENSORSTORE_RETURN_IF_ERROR(
cache->ValidateMetadataCompatibility(initial_metadata, new_metadata));
}
return absl::OkStatus();
}
Result<MetadataPtr> GetUpdatedMetadataWithAssumeCachedMetadata(
KvsMetadataDriverBase& driver, DataCacheBase& cache,
internal::OpenTransactionPtr transaction) {
assert(driver.assumed_metadata_time_ != absl::InfiniteFuture() &&
driver.assumed_metadata_);
assert(&cache == driver.cache());
const auto handle_entry_or_node =
[&](auto& entry_or_node) -> Result<MetadataPtr> {
MetadataPtr new_metadata;
if (MetadataCache::ReadLock<void> lock(entry_or_node);
lock.stamp().time > driver.assumed_metadata_time_) {
new_metadata = lock.shared_data();
} else {
new_metadata = driver.assumed_metadata_;
}
if constexpr (std::is_same_v<absl::remove_cvref_t<decltype(entry_or_node)>,
MetadataCache::TransactionNode>) {
TENSORSTORE_ASSIGN_OR_RETURN(
new_metadata,
entry_or_node.GetUpdatedMetadata(std::move(new_metadata)),
cache.metadata_cache_entry_->AnnotateError(_,
false));
}
return new_metadata;
};
if (transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node,
GetTransactionNode(*cache.metadata_cache_entry_, transaction));
return handle_entry_or_node(*node);
} else {
return handle_entry_or_node(*cache.metadata_cache_entry_);
}
}
Result<MetadataPtr> ValidateNewMetadata(
KvsMetadataDriverBase& driver, internal::OpenTransactionPtr transaction) {
MetadataPtr new_metadata;
auto& cache = *driver.cache();
if (driver.assumed_metadata_) {
if (driver.assumed_metadata_time_ == absl::InfiniteFuture()) {
return driver.assumed_metadata_;
}
TENSORSTORE_ASSIGN_OR_RETURN(new_metadata,
GetUpdatedMetadataWithAssumeCachedMetadata(
driver, cache, std::move(transaction)));
} else {
TENSORSTORE_ASSIGN_OR_RETURN(
new_metadata,
cache.metadata_cache_entry_->GetMetadata(std::move(transaction)));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateNewMetadata(&cache, new_metadata.get()));
return new_metadata;
}
Result<IndexTransform<>> GetInitialTransform(DataCacheBase* cache,
const void* metadata,
size_t component_index) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_transform, cache->GetExternalToInternalTransform(
cache->initial_metadata_.get(), component_index));
return ResolveBoundsFromMetadata(cache, metadata, component_index,
std::move(new_transform),
{});
}
}
void ChunkedDataCacheBase::GetComponentBounds(
const void* metadata, size_t component_index,
Box<dynamic_rank(kMaxRank)>& bounds, DimensionSet& implicit_lower_bounds,
DimensionSet& implicit_upper_bounds) {
const auto& grid = this->grid();
const auto& component_spec = grid.components[component_index];
const DimensionIndex component_rank = component_spec.rank();
bounds.set_rank(component_rank);
Box<dynamic_rank(kMaxRank)> grid_bounds(grid.chunk_shape.size());
DimensionSet grid_implicit_lower_bounds;
DimensionSet grid_implicit_upper_bounds;
this->GetChunkGridBounds(metadata, grid_bounds, grid_implicit_lower_bounds,
grid_implicit_upper_bounds);
span<const DimensionIndex> chunked_to_cell_dimensions =
component_spec.chunked_to_cell_dimensions;
bounds = component_spec.array_spec.overall_fill_value.domain();
implicit_lower_bounds = false;
implicit_upper_bounds = false;
for (DimensionIndex grid_dim = 0; grid_dim < grid_bounds.rank(); ++grid_dim) {
const DimensionIndex cell_dim = chunked_to_cell_dimensions[grid_dim];
bounds[cell_dim] = grid_bounds[grid_dim];
implicit_lower_bounds[cell_dim] = grid_implicit_lower_bounds[grid_dim];
implicit_upper_bounds[cell_dim] = grid_implicit_upper_bounds[grid_dim];
}
}
Result<ChunkLayout> ChunkedDataCacheBase::GetChunkLayout(
size_t component_index) {
return GetChunkLayoutFromMetadata(initial_metadata_.get(), component_index);
}
Future<IndexTransform<>> KvsMetadataDriverBase::ResolveBounds(
ResolveBoundsRequest request) {
return ResolveBounds(std::move(request), metadata_staleness_bound_);
}
Future<MetadataPtr> KvsMetadataDriverBase::ResolveMetadata(
internal::OpenTransactionPtr transaction,
absl::Time metadata_staleness_bound) {
if (assumed_metadata_ && assumed_metadata_time_ >= metadata_staleness_bound) {
return ValidateNewMetadata(*this, std::move(transaction));
}
auto* cache = this->cache();
if (transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node,
GetTransactionNode(*cache->metadata_cache_entry_, transaction));
auto read_future = node->Read({metadata_staleness_bound});
return MapFuture(
cache->executor(),
[cache = DataCacheBase::Ptr(cache), node = std::move(node)](
const Result<void>& result) -> Result<MetadataPtr> {
TENSORSTORE_RETURN_IF_ERROR(result);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_metadata, node->GetUpdatedMetadata(),
cache->metadata_cache_entry_->AnnotateError(_,
false));
TENSORSTORE_RETURN_IF_ERROR(
ValidateNewMetadata(cache.get(), new_metadata.get()));
return new_metadata;
},
std::move(read_future));
}
return MapFuture(
cache->executor(),
[cache = DataCacheBase::Ptr(cache)](
const Result<void>& result) -> Result<MetadataPtr> {
TENSORSTORE_RETURN_IF_ERROR(result);
auto new_metadata = cache->metadata_cache_entry_->GetMetadata();
TENSORSTORE_RETURN_IF_ERROR(
ValidateNewMetadata(cache.get(), new_metadata.get()));
return new_metadata;
},
cache->metadata_cache_entry_->Read({metadata_staleness_bound}));
}
Future<IndexTransform<>> KvsMetadataDriverBase::ResolveBounds(
ResolveBoundsRequest request, StalenessBound metadata_staleness_bound) {
auto* cache = this->cache();
return MapFutureValue(
cache->executor(),
[cache = DataCacheBase::Ptr(cache), component_index = component_index(),
options = std::move(request.options),
transform = std::move(request.transform)](
const MetadataPtr& new_metadata) mutable {
return ResolveBoundsFromMetadata(cache.get(), new_metadata.get(),
component_index, std::move(transform),
options);
},
ResolveMetadata(std::move(request.transaction),
metadata_staleness_bound.time));
}
namespace {
Future<const void> RequestResize(ChunkedDataCacheBase* cache,
internal::OpenTransactionPtr transaction,
ResizeParameters parameters) {
return cache->metadata_cache_entry_->RequestAtomicUpdate(
transaction,
[parameters = std::move(parameters),
cache = ChunkedDataCacheBase::Ptr(cache),
metadata_constraint = cache->initial_metadata_](
const MetadataCache::MetadataPtr& current_metadata)
-> Result<std::shared_ptr<const void>> {
if (!current_metadata) {
return absl::NotFoundError("Metadata was deleted");
}
if (metadata_constraint.get() != current_metadata.get()) {
TENSORSTORE_RETURN_IF_ERROR(cache->ValidateMetadataCompatibility(
metadata_constraint.get(), current_metadata.get()));
}
Box<dynamic_rank(kMaxRank)> bounds(parameters.new_inclusive_min.size());
DimensionSet implicit_lower_bounds;
DimensionSet implicit_upper_bounds;
cache->GetChunkGridBounds(current_metadata.get(), bounds,
implicit_lower_bounds, implicit_upper_bounds);
TENSORSTORE_RETURN_IF_ERROR(ValidateResizeConstraints(
bounds, parameters.new_inclusive_min, parameters.new_exclusive_max,
parameters.inclusive_min_constraint,
parameters.exclusive_max_constraint, parameters.expand_only,
parameters.shrink_only));
return cache->GetResizedMetadata(current_metadata.get(),
parameters.new_inclusive_min,
parameters.new_exclusive_max);
},
AtomicUpdateConstraint::kRequireExisting);
}
struct ResizeContinuation {
internal::IntrusivePtr<KvsMetadataDriverBase> driver;
internal::OpenTransactionPtr transaction;
size_t component_index;
IndexTransform<> transform;
Result<IndexTransform<>> GetResult() {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_metadata,
ValidateNewMetadata(*driver, std::move(transaction)));
return ResolveBoundsFromMetadata(driver->cache(), new_metadata.get(),
component_index, std::move(transform),
{});
}
void operator()(Promise<IndexTransform<>> promise, ReadyFuture<const void>) {
promise.SetResult(GetResult());
}
};
struct ResizeState {
internal::IntrusivePtr<KvsChunkedDriverBase> driver;
ChunkedDataCacheBase::Ptr cache;
internal::OpenTransactionPtr transaction;
size_t component_index;
IndexTransform<> transform;
ResizeParameters resize_parameters;
};
void SubmitResizeRequest(Promise<IndexTransform<>> promise, ResizeState state) {
auto* cache_ptr = state.cache.get();
LinkValue(
WithExecutor(cache_ptr->executor(),
ResizeContinuation{std::move(state.driver),
state.transaction, state.component_index,
std::move(state.transform)}),
std::move(promise),
RequestResize(cache_ptr, state.transaction,
std::move(state.resize_parameters)));
}
struct DeleteChunksForResizeContinuation {
std::unique_ptr<ResizeState> state;
void operator()(Promise<IndexTransform<>> promise, ReadyFuture<const void>) {
SubmitResizeRequest(std::move(promise), std::move(*state));
}
};
Future<const void> DeleteChunksForResize(
ChunkedDataCacheBase::Ptr cache, BoxView<> current_bounds,
span<const Index> new_inclusive_min, span<const Index> new_exclusive_max,
internal::OpenTransactionPtr transaction) {
span<const Index> chunk_shape = cache->grid().chunk_shape;
const DimensionIndex rank = chunk_shape.size();
assert(current_bounds.rank() == rank);
assert(new_inclusive_min.size() == rank);
assert(new_exclusive_max.size() == rank);
auto pair = PromiseFuturePair<void>::Make(MakeResult(absl::Status()));
pair.future.Force();
Box<dynamic_rank(internal::kNumInlinedDims)> current_grid_bounds(rank);
Box<dynamic_rank(internal::kNumInlinedDims)> new_grid_bounds(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
const IndexInterval cur_dim_bounds = current_bounds[i];
const IndexInterval new_dim_bounds = IndexInterval::UncheckedHalfOpen(
ExplicitIndexOr(new_inclusive_min[i], cur_dim_bounds.inclusive_min()),
ExplicitIndexOr(new_exclusive_max[i], cur_dim_bounds.exclusive_max()));
const Index chunk_size = chunk_shape[i];
current_grid_bounds[i] = DividePositiveRoundOut(cur_dim_bounds, chunk_size);
new_grid_bounds[i] = DividePositiveRoundOut(new_dim_bounds, chunk_size);
}
internal::BoxDifference box_difference(current_grid_bounds, new_grid_bounds);
Box<dynamic_rank(internal::kNumInlinedDims)> part(rank);
for (Index box_i = 0; box_i < box_difference.num_sub_boxes(); ++box_i) {
box_difference.GetSubBox(box_i, part);
IterateOverIndexRange(part, [&](span<const Index> cell_indices) {
LinkError(pair.promise, cache->DeleteCell(cell_indices, transaction));
});
}
return pair.future;
}
struct ResolveBoundsForDeleteAndResizeContinuation {
std::unique_ptr<ResizeState> state;
void operator()(Promise<IndexTransform<>> promise, ReadyFuture<const void>) {
std::shared_ptr<const void> new_metadata;
if (auto result = ValidateNewMetadata(*state->driver, state->transaction);
result.ok()) {
new_metadata = *std::move(result);
} else {
promise.SetResult(std::move(result).status());
return;
}
const DimensionIndex grid_rank = state->cache->grid().chunk_shape.size();
assert(!state->resize_parameters.expand_only);
Box<dynamic_rank(internal::kNumInlinedDims)> bounds(grid_rank);
DimensionSet implicit_lower_bounds;
DimensionSet implicit_upper_bounds;
state->cache->GetChunkGridBounds(new_metadata.get(), bounds,
implicit_lower_bounds,
implicit_upper_bounds);
if (auto status = ValidateResizeConstraints(
bounds, state->resize_parameters.new_inclusive_min,
state->resize_parameters.new_exclusive_max,
state->resize_parameters.inclusive_min_constraint,
state->resize_parameters.exclusive_max_constraint,
false,
state->resize_parameters.shrink_only);
!status.ok()) {
promise.SetResult(std::move(status));
return;
}
auto* state_ptr = state.get();
LinkValue(
WithExecutor(state_ptr->cache->executor(),
DeleteChunksForResizeContinuation{std::move(state)}),
std::move(promise),
DeleteChunksForResize(state_ptr->cache, bounds,
state_ptr->resize_parameters.new_inclusive_min,
state_ptr->resize_parameters.new_exclusive_max,
state_ptr->transaction));
}
};
}
Result<ChunkLayout> KvsChunkedDriverBase::GetChunkLayout(
IndexTransformView<> transform) {
auto* cache = this->cache();
return cache->GetChunkLayoutFromMetadata(cache->initial_metadata().get(),
component_index()) |
transform;
}
Future<IndexTransform<>> KvsChunkedDriverBase::Resize(
internal::Driver::ResizeRequest request) {
if (assumed_metadata_time_ == absl::InfiniteFuture()) {
return absl::InvalidArgumentError(
"Resize not supported because assume_metadata was specified");
}
auto* cache = this->cache();
auto resize_parameters = GetResizeParameters(
cache, cache->initial_metadata_.get(), component_index(),
request.transform, request.inclusive_min, request.exclusive_max,
request.options,
request.transaction ? request.transaction->mode()
: TransactionMode::no_transaction_mode);
if (!resize_parameters) {
if (resize_parameters.status().code() == absl::StatusCode::kAborted) {
return ResolveBounds(
{std::move(request.transaction), std::move(request.transform)},
{});
}
return resize_parameters.status();
}
auto pair = PromiseFuturePair<IndexTransform<>>::Make();
ResizeState resize_state{
internal::IntrusivePtr<KvsChunkedDriverBase>(this),
ChunkedDataCacheBase::Ptr(cache),
std::move(request.transaction),
component_index(),
std::move(request.transform),
*std::move(resize_parameters),
};
if ((request.options.mode & resize_metadata_only) == resize_metadata_only ||
(request.options.mode & expand_only) == expand_only) {
SubmitResizeRequest(std::move(pair.promise), std::move(resize_state));
} else {
LinkValue(WithExecutor(
cache->executor(),
ResolveBoundsForDeleteAndResizeContinuation{
std::make_unique<ResizeState>(std::move(resize_state))}),
std::move(pair.promise),
cache->metadata_cache_entry_->Read({absl::Now()}));
}
return std::move(pair.future);
}
Result<IndexTransform<>> KvsMetadataDriverBase::GetBoundSpecData(
internal::OpenTransactionPtr transaction, KvsDriverSpec& spec,
IndexTransformView<> transform_view) {
auto* cache = this->cache();
auto* metadata_cache = cache->metadata_cache();
TENSORSTORE_ASSIGN_OR_RETURN(spec.store.driver,
metadata_cache->base_store()->GetBoundSpec());
spec.store.path = cache->GetBaseKvstorePath();
spec.data_copy_concurrency = metadata_cache->data_copy_concurrency_;
spec.cache_pool = cache->cache_pool_;
if (spec.cache_pool != metadata_cache->metadata_cache_pool_) {
spec.metadata_cache_pool = metadata_cache->metadata_cache_pool_;
}
spec.delete_existing = false;
spec.open = true;
spec.create = false;
spec.assume_metadata = assumed_metadata_time_ == absl::InfiniteFuture();
spec.staleness.metadata = this->metadata_staleness_bound();
spec.staleness.data = this->data_staleness_bound();
spec.schema.Set(RankConstraint{this->rank()}).IgnoreError();
spec.schema.Set(this->dtype()).IgnoreError();
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_metadata,
ValidateNewMetadata(*this, std::move(transaction)));
TENSORSTORE_RETURN_IF_ERROR(cache->GetBoundSpecData(
spec, validated_metadata.get(), this->component_index()));
IndexTransform<> transform(transform_view);
TENSORSTORE_ASSIGN_OR_RETURN(
auto external_to_internal_transform,
cache->GetExternalToInternalTransform(validated_metadata.get(),
component_index()));
if (external_to_internal_transform.valid()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto internal_to_external_transform,
InverseTransform(external_to_internal_transform));
TENSORSTORE_ASSIGN_OR_RETURN(
transform,
ComposeTransforms(internal_to_external_transform, transform));
}
return transform;
}
absl::Status KvsDriverSpec::ApplyOptions(SpecOptions&& options) {
if (options.recheck_cached_data.specified()) {
staleness.data = StalenessBound(options.recheck_cached_data);
}
if (options.recheck_cached_metadata.specified()) {
staleness.metadata = StalenessBound(options.recheck_cached_metadata);
}
if (options.kvstore.valid()) {
if (store.valid()) {
return absl::InvalidArgumentError("\"kvstore\" is already specified");
}
store = std::move(options.kvstore);
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(static_cast<Schema&&>(options)));
return OpenModeSpec::ApplyOptions(options);
}
OpenMode KvsDriverSpec::open_mode() const {
auto mode = this->OpenModeSpec::open_mode();
return (mode == OpenMode{}) ? OpenMode::open : mode;
}
kvstore::Spec KvsDriverSpec::GetKvstore() const { return store; }
KvStore KvsMetadataDriverBase::GetKvstore(const Transaction& transaction) {
auto* cache = this->cache();
auto* metadata_cache = cache->metadata_cache();
return KvStore{kvstore::DriverPtr(metadata_cache->base_store()),
cache->GetBaseKvstorePath(), transaction};
}
namespace {
Result<size_t> ValidateOpenRequest(OpenState* state, const void* metadata) {
auto& base = *(PrivateOpenState*)state;
if (!metadata) {
return absl::NotFoundError(
GetMetadataMissingErrorMessage(base.metadata_cache_entry_.get()));
}
return state->GetComponentIndex(metadata, base.spec_->open_mode());
}
Result<internal::Driver::Handle> CreateTensorStoreFromMetadata(
OpenState::Ptr state, std::shared_ptr<const void> metadata,
size_t component_index) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "CreateTensorStoreFromMetadata: state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto read_write_mode = state->GetReadWriteMode(metadata.get());
if (base.read_write_mode_ != ReadWriteMode::dynamic) {
TENSORSTORE_RETURN_IF_ERROR(internal::ValidateSupportsModes(
read_write_mode, base.read_write_mode_));
read_write_mode = base.read_write_mode_;
}
std::string chunk_cache_identifier;
bool data_cache_uses_metadata_cache_pool =
state->DataCacheUsesMetadataCachePool(metadata.get());
if (!base.metadata_cache_key_.empty()) {
auto data_cache_key = state->GetDataCacheKey(metadata.get());
if (!data_cache_key.empty()) {
internal::EncodeCacheKey(&chunk_cache_identifier, data_cache_key,
base.metadata_cache_entry_.get(),
state->cache_pool()->get());
}
}
absl::Status data_key_value_store_status;
const auto& state_ref = *state;
auto data_cache = internal::GetCacheWithExplicitTypeInfo<DataCacheBase>(
(data_cache_uses_metadata_cache_pool
? GetOwningCache(*base.metadata_cache_entry_).pool()
: state->cache_pool()->get()),
typeid(state_ref), chunk_cache_identifier,
[&]() -> std::unique_ptr<DataCacheBase> {
auto store_result = state->GetDataKeyValueStore(
GetOwningCache(*base.metadata_cache_entry_).base_store_,
metadata.get());
if (!store_result) {
data_key_value_store_status = std::move(store_result).status();
return nullptr;
}
DataCacheInitializer initializer;
initializer.store = *std::move(store_result);
initializer.metadata_cache_entry = base.metadata_cache_entry_;
initializer.metadata = metadata;
initializer.cache_pool = state->cache_pool();
return state->GetDataCache(std::move(initializer));
});
TENSORSTORE_RETURN_IF_ERROR(data_key_value_store_status);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_transform,
GetInitialTransform(data_cache.get(), metadata.get(), component_index));
if (base.transaction_ &&
!(base.spec_->assume_metadata || base.spec_->assume_cached_metadata)) {
data_cache->metadata_cache_entry_
->RequestAtomicUpdate(
base.transaction_,
[data_cache = data_cache, transform = new_transform,
component_index](
const MetadataCache::MetadataPtr& existing_metadata)
-> Result<MetadataCache::MetadataPtr> {
TENSORSTORE_RETURN_IF_ERROR(ValidateNewMetadata(
data_cache.get(), existing_metadata.get()));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_transform,
GetInitialTransform(data_cache.get(), existing_metadata.get(),
component_index));
if (transform != new_transform) {
return absl::AbortedError("Metadata is inconsistent");
}
return existing_metadata;
},
AtomicUpdateConstraint::kRequireExisting)
.IgnoreFuture();
}
DriverInitializer initializer;
initializer.cache = std::move(data_cache);
initializer.component_index = component_index;
initializer.data_staleness_bound =
base.spec_->staleness.data.BoundAtOpen(base.request_time_);
internal::ReadWritePtr<KvsMetadataDriverBase> driver(
state->AllocateDriver(std::move(initializer)), read_write_mode);
driver->metadata_staleness_bound_ =
base.spec_->staleness.metadata.BoundAtOpen(base.request_time_);
if (base.spec_->assume_metadata || base.spec_->assume_cached_metadata) {
driver->assumed_metadata_ = metadata;
driver->assumed_metadata_time_ = base.spec_->assume_cached_metadata
? base.request_time_
: absl::InfiniteFuture();
}
return internal::Driver::Handle{
std::move(driver), std::move(new_transform),
internal::TransactionState::ToTransaction(std::move(base.transaction_))};
}
struct HandleWroteMetadata {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise,
ReadyFuture<const void> future) {
auto& base = *(PrivateOpenState*)state.get();
auto& result = future.result();
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "HandleWroteMetadata: state=" << state.get()
<< ", status=" << result.status();
if (!result) {
if (result.status().code() != absl::StatusCode::kAlreadyExists ||
!base.spec_->open) {
promise.SetResult(result.status());
return;
}
}
promise.SetResult([&]() -> Result<internal::Driver::Handle> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto metadata,
base.metadata_cache_entry_->GetMetadata(base.transaction_));
return state->CreateDriverHandleFromMetadata(std::move(metadata));
}());
}
};
void CreateMetadata(MetadataOpenState::Ptr state,
Promise<internal::Driver::Handle> promise) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "CreateMetadata: state=" << state.get();
auto state_ptr = state.get();
auto& base = *(PrivateOpenState*)state.get();
internal::OpenTransactionPtr transaction = base.transaction_;
auto state_copy = state;
Link(WithExecutor(state_ptr->executor(),
HandleWroteMetadata{std::move(state)}),
std::move(promise),
base.metadata_cache_entry_->RequestAtomicUpdate(
transaction,
[state = std::move(state_copy)](
const MetadataCache::MetadataPtr& existing_metadata)
-> Result<MetadataCache::MetadataPtr> {
return state->Create(existing_metadata.get(), {});
},
state_ptr->GetCreateConstraint(), base.request_time_));
}
struct HandleReadMetadata {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise,
ReadyFuture<const void> metadata_future) {
auto& base = *(PrivateOpenState*)state.get();
std::shared_ptr<const void> metadata;
if (auto result =
base.metadata_cache_entry_->GetMetadata(base.transaction_);
result.ok()) {
metadata = *std::move(result);
} else {
promise.SetResult(std::move(result).status());
return;
}
auto handle_result = state->CreateDriverHandleFromMetadata(metadata);
if (handle_result) {
promise.SetResult(std::move(handle_result));
return;
}
if (handle_result.status().code() == absl::StatusCode::kNotFound) {
if (base.spec_->create) {
CreateMetadata(std::move(state), std::move(promise));
return;
}
}
promise.SetResult(std::move(handle_result).status());
}
};
struct GetMetadataForOpen {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "GetMetadataForOpen: state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto state_ptr = state.get();
auto batch = std::move(base.batch_);
if (base.spec_->open) {
if (base.spec_->assume_metadata || base.spec_->assume_cached_metadata) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto metadata, state->Create(nullptr, {true}),
static_cast<void>(promise.SetResult(_)));
promise.SetResult(
state->CreateDriverHandleFromMetadata(std::move(metadata)));
return;
}
LinkValue(
WithExecutor(state_ptr->executor(),
HandleReadMetadata{std::move(state)}),
std::move(promise),
base.metadata_cache_entry_->Read(
{base.spec_->staleness.metadata.BoundAtOpen(base.request_time_)
.time,
batch}));
return;
}
assert(base.spec_->create);
CreateMetadata(std::move(state), std::move(promise));
}
};
struct HandleKeyValueStoreReady {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise,
ReadyFuture<const void> store) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "Metadata kvstore ready: state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto* state_ptr = state.get();
if (base.spec_->delete_existing) {
KeyRange range_to_delete =
KeyRange::Prefix(state->GetPrefixForDeleteExisting());
auto* kvstore =
GetOwningCache(*base.metadata_cache_entry_).base_store_.get();
if (!base.transaction_) {
LinkValue(std::bind(WithExecutor(state_ptr->executor(),
GetMetadataForOpen{std::move(state)}),
std::placeholders::_1),
std::move(promise),
kvstore->DeleteRange(std::move(range_to_delete)));
return;
}
if (auto status = kvstore->TransactionalDeleteRange(
base.transaction_, std::move(range_to_delete));
!status.ok()) {
promise.SetResult(status);
return;
}
base.transaction_->Barrier();
}
GetMetadataForOpen{std::move(state)}(std::move(promise));
}
};
}
Future<const void> MetadataCache::Entry::RequestAtomicUpdate(
const internal::OpenTransactionPtr& transaction, UpdateFunction update,
AtomicUpdateConstraint update_constraint,
std::optional<absl::Time> read_time) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*this, transaction));
node->updated_metadata_base_state_ =
internal::UnownedToShared(&invalid_metadata);
node->updated_metadata_ = nullptr;
if (node->transaction()->implicit_transaction()) {
auto [promise, future] = PromiseFuturePair<void>::Make();
node->AddPendingWrite(
PendingWrite{std::move(update), update_constraint, promise});
LinkError(std::move(promise), node.unlock()->transaction()->future());
return std::move(future);
}
node->AddPendingWrite(PendingWrite{std::move(update), update_constraint});
if (read_time) {
return node->Read({*read_time});
}
return MakeReadyFuture();
}
Result<MetadataCache::MetadataPtr> MetadataCache::Entry::GetMetadata(
internal::OpenTransactionPtr transaction) {
if (!transaction) return GetMetadata();
TENSORSTORE_ASSIGN_OR_RETURN(auto node,
GetTransactionNode(*this, transaction));
TENSORSTORE_ASSIGN_OR_RETURN(auto metadata, node->GetUpdatedMetadata(),
this->AnnotateError(_, false));
return metadata;
}
Result<MetadataCache::MetadataPtr>
MetadataCache::TransactionNode::GetUpdatedMetadata(MetadataPtr metadata) {
UniqueWriterLock lock(*this);
if (this->updated_metadata_base_state_ == metadata) {
return this->updated_metadata_;
}
this->updated_metadata_base_state_ = metadata;
for (const auto& request : this->pending_writes) {
auto result = request.update(metadata);
if (result) {
assert(*result);
assert(request.update_constraint !=
AtomicUpdateConstraint::kRequireMissing ||
metadata == nullptr);
assert(request.update_constraint !=
AtomicUpdateConstraint::kRequireExisting ||
metadata != nullptr);
metadata = std::move(*result);
if (!request.promise.null()) {
request.promise.raw_result() = MakeResult();
}
} else {
if (!request.promise.null()) {
request.promise.raw_result() = GetOwningEntry(*this).AnnotateError(
result.status(), false);
} else {
this->updated_metadata_ = result.status();
return std::move(result).status();
}
}
}
this->updated_metadata_ = metadata;
return metadata;
}
Result<MetadataCache::MetadataPtr>
MetadataCache::TransactionNode::GetUpdatedMetadata() {
auto metadata = ReadLock<void>(*this).shared_data();
return GetUpdatedMetadata(std::move(metadata));
}
void MetadataCache::Entry::DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) {
GetOwningCache(*this).executor()([this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
MetadataPtr new_metadata;
if (value) {
if (auto result = GetOwningCache(*this).DecodeMetadata(this->key(),
*std::move(value));
result.ok()) {
new_metadata = *std::move(result);
} else {
execution::set_error(
receiver, internal::ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(receiver, std::move(new_metadata));
});
}
std::string MetadataCache::Entry::GetKeyValueStoreKey() {
return GetOwningCache(*this).GetMetadataStorageKey(this->key());
}
void MetadataCache::TransactionNode::DoApply(ApplyOptions options,
ApplyReceiver receiver) {
if (this->pending_writes.empty() &&
options.apply_mode != ApplyOptions::kSpecifyUnchanged) {
execution::set_value(
receiver, ReadState{{}, TimestampedStorageGeneration::Unconditional()});
return;
}
auto continuation = [this, receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
return execution::set_error(receiver, future.result().status());
}
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Apply metadata";
auto read_state = AsyncCache::ReadLock<void>(*this).read_state();
std::shared_ptr<const void> new_data;
if (auto result = this->GetUpdatedMetadata(read_state.data); result.ok()) {
new_data = *std::move(result);
} else {
execution::set_error(receiver, std::move(result).status());
return;
}
if (new_data != read_state.data) {
read_state.stamp.generation.MarkDirty();
read_state.data = std::move(new_data);
}
execution::set_value(receiver, std::move(read_state));
};
this->Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(GetOwningCache(*this).executor(),
std::move(continuation)));
}
void MetadataCache::TransactionNode::InvalidateReadState() {
Base::TransactionNode::InvalidateReadState();
this->updated_metadata_base_state_ =
internal::UnownedToShared(&invalid_metadata);
this->updated_metadata_ = nullptr;
}
void MetadataCache::Entry::DoEncode(std::shared_ptr<const void> data,
EncodeReceiver receiver) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Encoding metadata";
auto& entry = GetOwningEntry(*this);
auto& cache = GetOwningCache(entry);
if (auto encoded_result = cache.EncodeMetadata(entry.key(), data.get());
encoded_result.ok()) {
execution::set_value(receiver, *std::move(encoded_result));
} else {
execution::set_error(receiver, std::move(encoded_result).status());
}
}
Future<const void> DataCache::DeleteCell(
span<const Index> grid_cell_indices,
internal::OpenTransactionPtr transaction) {
return internal::ChunkCache::DeleteCell(grid_cell_indices,
std::move(transaction));
}
namespace {
internal::CachePtr<MetadataCache> GetOrCreateMetadataCache(
MetadataOpenState* state) {
auto& base = *(PrivateOpenState*)state;
auto& spec = *base.spec_;
internal::EncodeCacheKey(&base.metadata_cache_key_, spec.store.driver,
typeid(*state), state->GetMetadataCacheKey());
return internal::GetOrCreateAsyncInitializedCache<MetadataCache>(
state->metadata_cache_pool()->get(), base.metadata_cache_key_,
[&] {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "Creating metadata cache: open_state=" << state;
return state->GetMetadataCache(
{base.spec_->data_copy_concurrency, state->metadata_cache_pool()});
},
[&](Promise<void> initialized,
internal::CachePtr<MetadataCache> metadata_cache) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "Opening metadata kvstore: open_state=" << state;
LinkValue(
[state = MetadataOpenState::Ptr(state),
metadata_cache = std::move(metadata_cache)](
Promise<void> metadata_cache_promise,
ReadyFuture<kvstore::DriverPtr> future) {
metadata_cache->base_store_ = *future.result();
if (auto result = state->GetMetadataKeyValueStore(
metadata_cache->base_store_);
result.ok()) {
metadata_cache->SetKvStoreDriver(*std::move(result));
} else {
metadata_cache_promise.SetResult(std::move(result).status());
}
},
initialized, kvstore::Open(spec.store.driver));
});
}
}
Result<internal::Driver::Handle> OpenState::CreateDriverHandleFromMetadata(
std::shared_ptr<const void> metadata) {
TENSORSTORE_ASSIGN_OR_RETURN(size_t component_index,
ValidateOpenRequest(this, metadata.get()));
return CreateTensorStoreFromMetadata(OpenState::Ptr(this),
std::move(metadata), component_index);
}
Future<internal::Driver::Handle> OpenDriver(MetadataOpenState::Ptr state) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "OpenDriver: open_state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto& spec = *base.spec_;
TENSORSTORE_RETURN_IF_ERROR(
spec.OpenModeSpec::Validate(base.read_write_mode_));
if (!spec.store.valid()) {
return absl::InvalidArgumentError("\"kvstore\" must be specified");
}
auto* state_ptr = state.get();
auto metadata_cache = GetOrCreateMetadataCache(state_ptr);
base.metadata_cache_entry_ =
GetCacheEntry(metadata_cache, state->GetMetadataCacheEntryKey());
return PromiseFuturePair<internal::Driver::Handle>::LinkValue(
HandleKeyValueStoreReady{std::move(state)},
metadata_cache->initialized_)
.future;
}
Result<IndexTransform<>> ResolveBoundsFromMetadata(
DataCacheBase* data_cache, const void* new_metadata, size_t component_index,
IndexTransform<> transform, ResolveBoundsOptions options) {
DimensionSet base_implicit_lower_bounds;
DimensionSet base_implicit_upper_bounds;
Box<dynamic_rank(kMaxRank)> base_bounds;
data_cache->GetComponentBounds(new_metadata, component_index, base_bounds,
base_implicit_lower_bounds,
base_implicit_upper_bounds);
if ((options.mode & fix_resizable_bounds) == fix_resizable_bounds) {
base_implicit_lower_bounds = false;
base_implicit_upper_bounds = false;
}
return PropagateBoundsToTransform(
BoxView<>(base_bounds), base_implicit_lower_bounds,
base_implicit_upper_bounds, std::move(transform));
}
absl::Status ValidateResizeConstraints(
BoxView<> current_domain, span<const Index> new_inclusive_min,
span<const Index> new_exclusive_max,
span<const Index> inclusive_min_constraint,
span<const Index> exclusive_max_constraint, bool expand_only,
bool shrink_only) {
TENSORSTORE_RETURN_IF_ERROR(ValidateResizeDomainConstraint(
current_domain, inclusive_min_constraint, exclusive_max_constraint));
TENSORSTORE_RETURN_IF_ERROR(ValidateExpandShrinkConstraints(
current_domain, new_inclusive_min, new_exclusive_max, expand_only,
shrink_only));
return absl::OkStatus();
}
Result<ResizeParameters> GetResizeParameters(
ChunkedDataCacheBase* data_cache, const void* metadata,
size_t component_index, IndexTransformView<> transform,
span<const Index> inclusive_min, span<const Index> exclusive_max,
ResizeOptions options, TransactionMode transaction_mode) {
assert(transform.input_rank() == inclusive_min.size());
assert(transform.input_rank() == exclusive_max.size());
const DimensionIndex output_rank = transform.output_rank();
DimensionSet base_implicit_lower_bounds;
DimensionSet base_implicit_upper_bounds;
Box<dynamic_rank(kMaxRank)> base_bounds;
data_cache->GetComponentBounds(metadata, component_index, base_bounds,
base_implicit_lower_bounds,
base_implicit_upper_bounds);
const auto& grid = data_cache->grid();
const DimensionIndex grid_rank = grid.grid_rank();
Index new_output_inclusive_min[kMaxRank];
Index new_output_exclusive_max[kMaxRank];
Index output_inclusive_min_constraint[kMaxRank];
Index output_exclusive_max_constraint[kMaxRank];
bool is_noop;
TENSORSTORE_RETURN_IF_ERROR(PropagateInputDomainResizeToOutput(
transform, inclusive_min, exclusive_max,
(options.mode & resize_tied_bounds) ==
resize_tied_bounds,
{&output_inclusive_min_constraint[0], output_rank},
{&output_exclusive_max_constraint[0], output_rank},
{&new_output_inclusive_min[0], output_rank},
{&new_output_exclusive_max[0], output_rank}, &is_noop));
if (is_noop) return absl::AbortedError("");
if (grid.components.size() != 1 && !(options.mode & resize_tied_bounds)) {
return absl::FailedPreconditionError(
"Resize operation would affect other fields but "
"`resize_tied_bounds` was not specified");
}
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const IndexInterval dim_bounds = base_bounds[output_dim];
if (!base_implicit_lower_bounds[output_dim]) {
const Index min_constraint = output_inclusive_min_constraint[output_dim];
if (!ImplicitOrEqual(min_constraint, dim_bounds.inclusive_min())) {
return ShapeConstraintError(output_dim, dim_bounds.inclusive_min(),
min_constraint);
}
const Index new_inclusive_min = new_output_inclusive_min[output_dim];
if (!ImplicitOrEqual(new_inclusive_min, dim_bounds.inclusive_min())) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Cannot change inclusive lower bound of output dimension ",
output_dim, ", which is fixed at ", dim_bounds.inclusive_min(),
", to ", new_inclusive_min));
}
}
if (!base_implicit_upper_bounds[output_dim]) {
const Index max_constraint = output_exclusive_max_constraint[output_dim];
if (!ImplicitOrEqual(max_constraint, dim_bounds.exclusive_max())) {
return ShapeConstraintError(output_dim, max_constraint,
dim_bounds.exclusive_max());
}
const Index new_exclusive_max = new_output_exclusive_max[output_dim];
if (!ImplicitOrEqual(new_exclusive_max, dim_bounds.exclusive_max())) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Cannot change exclusive upper bound of output dimension ",
output_dim, ", which is fixed at ", dim_bounds.exclusive_max(),
", to ", new_exclusive_max));
}
}
if (transaction_mode == TransactionMode::atomic_isolated &&
!(options.mode & resize_metadata_only) &&
!(options.mode & expand_only)) {
output_inclusive_min_constraint[output_dim] = dim_bounds.inclusive_min();
output_exclusive_max_constraint[output_dim] = dim_bounds.exclusive_max();
}
}
span<const DimensionIndex> chunked_to_cell_dimensions =
grid.components[component_index].chunked_to_cell_dimensions;
std::vector<Index> new_grid_inclusive_min(grid_rank);
std::vector<Index> new_grid_exclusive_max(grid_rank);
std::vector<Index> grid_inclusive_min_constraint(grid_rank);
std::vector<Index> grid_exclusive_max_constraint(grid_rank);
for (DimensionIndex i = 0; i < grid_rank; ++i) {
const DimensionIndex j = chunked_to_cell_dimensions[i];
new_grid_inclusive_min[i] = new_output_inclusive_min[j];
new_grid_exclusive_max[i] = new_output_exclusive_max[j];
grid_inclusive_min_constraint[i] = output_inclusive_min_constraint[j];
grid_exclusive_max_constraint[i] = output_exclusive_max_constraint[j];
}
return ResizeParameters{
new_grid_inclusive_min,
new_grid_exclusive_max,
grid_inclusive_min_constraint,
grid_exclusive_max_constraint,
(options.mode & expand_only) == expand_only,
(options.mode & shrink_only) == shrink_only};
}
void KvsMetadataDriverBase::GarbageCollectionBase::Visit(
garbage_collection::GarbageCollectionVisitor& visitor,
const KvsMetadataDriverBase& value) {
auto* cache = value.cache();
auto* metadata_cache = cache->metadata_cache();
garbage_collection::GarbageCollectionVisit(visitor,
*metadata_cache->base_store());
}
namespace jb = tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_BINDER(
SpecJsonBinder,
jb::Sequence(
jb::Member(internal::DataCopyConcurrencyResource::id,
jb::Projection<&KvsDriverSpec::data_copy_concurrency>()),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&KvsDriverSpec::cache_pool>()),
jb::Member("metadata_cache_pool",
jb::Projection<&KvsDriverSpec::metadata_cache_pool>()),
jb::Projection<&KvsDriverSpec::store>(jb::KvStoreSpecAndPathJsonBinder),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->store.path);
return absl::OkStatus();
}),
jb::Projection<&KvsDriverSpec::staleness>(jb::Sequence(
jb::Member("recheck_cached_metadata",
jb::Projection(&StalenessBounds::metadata,
jb::DefaultValue([](auto* obj) {
obj->bounded_by_open_time = true;
}))),
jb::Member("recheck_cached_data",
jb::Projection(&StalenessBounds::data,
jb::DefaultInitializedValue())))),
internal::OpenModeSpecJsonBinder));
}
} | #include "tensorstore/driver/kvs_backed_chunk_driver.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/driver/kvs_backed_chunk_driver_impl.h"
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::Index;
using ::tensorstore::kImplicit;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_kvs_backed_chunk_driver::
ValidateResizeConstraints;
using ISpan = ::tensorstore::span<const Index>;
TEST(ValidateResizeConstraintsTest, Success) {
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({4, 6}),
ISpan({0, 0}),
ISpan({4, kImplicit}),
false,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 3}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
true));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 5}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
true));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 5}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
true));
}
TEST(ValidateResizeConstraintsTest, Failure) {
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({5, kImplicit}),
false,
false),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Resize operation would also affect output dimension 0 "
"over the out-of-bounds interval \\[4, 5\\)"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({3, kImplicit}),
false,
false),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would also affect output dimension 0 over the "
"interval \\[3, 4\\) but `resize_tied_bounds` was not specified"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
true),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would expand output dimension 1 from "
"\\[0, 5\\) to \\[0, 6\\) but `shrink_only` was specified"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 4}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
false),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would shrink output dimension 1 from "
"\\[0, 5\\) to \\[0, 4\\) but `expand_only` was specified"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/kvs_backed_chunk_driver.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/kvs_backed_chunk_driver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7db001ef-4305-4c62-9979-270514aeb36e | cpp | google/tensorstore | chunk_encoding | tensorstore/driver/neuroglancer_precomputed/chunk_encoding.cc | tensorstore/driver/neuroglancer_precomputed/chunk_encoding_test.cc | #include "tensorstore/driver/neuroglancer_precomputed/chunk_encoding.h"
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/neuroglancer_precomputed/metadata.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include "tensorstore/internal/data_type_endian_conversion.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/internal/image/jpeg_reader.h"
#include "tensorstore/internal/image/jpeg_writer.h"
#include "tensorstore/internal/image/png_reader.h"
#include "tensorstore/internal/image/png_writer.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_neuroglancer_precomputed {
using ::tensorstore::internal_image::ImageInfo;
using ::tensorstore::internal_image::JpegWriterOptions;
using ::tensorstore::internal_image::PngWriterOptions;
Result<SharedArray<const void>> DecodeRawChunk(
DataType dtype, span<const Index, 4> shape,
StridedLayoutView<4> chunk_layout, absl::Cord buffer) {
const Index expected_bytes = ProductOfExtents(shape) * dtype.size();
if (expected_bytes != static_cast<Index>(buffer.size())) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected chunk length to be ", expected_bytes,
", but received ", buffer.size(), " bytes"));
}
auto flat_buffer = buffer.Flatten();
if (absl::c_equal(shape, chunk_layout.shape())) {
auto decoded_array = internal::TryViewCordAsArray(
buffer, 0, dtype, endian::little, chunk_layout);
if (decoded_array.valid()) return {std::in_place, decoded_array};
}
Array<const void, 4> source(
{static_cast<const void*>(flat_buffer.data()), dtype}, shape);
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
value_init, dtype),
chunk_layout);
ArrayView<void> partial_decoded_array(
full_decoded_array.element_pointer(),
StridedLayoutView<>{shape, chunk_layout.byte_strides()});
internal::DecodeArray(source, endian::little, partial_decoded_array);
return full_decoded_array;
}
template <typename ImageReader>
Result<SharedArray<const void>> DecodeImageChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
auto array = AllocateArray(
{partial_shape[1], partial_shape[2], partial_shape[3], partial_shape[0]},
c_order, default_init, dtype);
{
riegeli::CordReader<> cord_reader(&encoded_input);
ImageReader reader;
TENSORSTORE_RETURN_IF_ERROR(reader.Initialize(&cord_reader));
auto info = reader.GetImageInfo();
const Index num_elements = ProductOfExtents(partial_shape.subspan<1>());
size_t total_pixels;
if (internal::MulOverflow(static_cast<size_t>(info.width),
static_cast<size_t>(info.height),
&total_pixels) ||
num_elements == std::numeric_limits<Index>::max() ||
static_cast<Index>(total_pixels) != num_elements ||
static_cast<Index>(info.num_components) != partial_shape[0]) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Image dimensions (", info.width, ", ", info.height, ", ",
info.num_components,
") are not compatible with expected chunk shape ", partial_shape));
}
TENSORSTORE_RETURN_IF_ERROR(reader.Decode(
tensorstore::span(reinterpret_cast<unsigned char*>(array.data()),
ImageRequiredBytes(info))));
if (!cord_reader.Close()) {
return cord_reader.status();
}
}
if (partial_shape[0] == 1 &&
absl::c_equal(partial_shape, chunk_layout.shape())) {
return SharedArray<const void>(array.element_pointer(), chunk_layout);
}
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
default_init, dtype),
chunk_layout);
Array<void, 4> partial_decoded_array(
full_decoded_array.element_pointer(),
StridedLayout<4>(
{partial_shape[1], partial_shape[2], partial_shape[3],
partial_shape[0]},
{chunk_layout.byte_strides()[1], chunk_layout.byte_strides()[2],
chunk_layout.byte_strides()[3], chunk_layout.byte_strides()[0]}));
CopyArray(array, partial_decoded_array);
return full_decoded_array;
}
Result<SharedArray<const void>> DecodeJpegChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
return DecodeImageChunk<internal_image::JpegReader>(
dtype, partial_shape, chunk_layout, std::move(encoded_input));
}
Result<SharedArray<const void>> DecodePngChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
return DecodeImageChunk<internal_image::PngReader>(
dtype, partial_shape, chunk_layout, std::move(encoded_input));
}
Result<SharedArray<const void>> DecodeCompressedSegmentationChunk(
DataType dtype, span<const Index, 4> shape,
StridedLayoutView<4> chunk_layout, std::array<Index, 3> block_size,
absl::Cord buffer) {
auto flat_buffer = buffer.Flatten();
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
default_init, dtype),
chunk_layout);
std::ptrdiff_t output_shape_ptrdiff_t[4] = {shape[0], shape[1], shape[2],
shape[3]};
std::ptrdiff_t block_shape_ptrdiff_t[3] = {block_size[2], block_size[1],
block_size[0]};
std::ptrdiff_t output_byte_strides[4] = {
chunk_layout.byte_strides()[0], chunk_layout.byte_strides()[1],
chunk_layout.byte_strides()[2], chunk_layout.byte_strides()[3]};
bool success = false;
switch (dtype.id()) {
case DataTypeId::uint32_t:
success = neuroglancer_compressed_segmentation::DecodeChannels(
flat_buffer, block_shape_ptrdiff_t, output_shape_ptrdiff_t,
output_byte_strides,
static_cast<uint32_t*>(full_decoded_array.data()));
break;
case DataTypeId::uint64_t:
success = neuroglancer_compressed_segmentation::DecodeChannels(
flat_buffer, block_shape_ptrdiff_t, output_shape_ptrdiff_t,
output_byte_strides,
static_cast<uint64_t*>(full_decoded_array.data()));
break;
default:
ABSL_UNREACHABLE();
}
if (!success) {
return absl::InvalidArgumentError(
"Corrupted Neuroglancer compressed segmentation");
}
return full_decoded_array;
}
void GetChunkShape(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata, size_t scale_index,
span<const Index, 4> full_chunk_shape,
span<Index, 4> partial_chunk_shape) {
const auto& scale = metadata.scales[scale_index];
partial_chunk_shape[0] = full_chunk_shape[0];
for (int i = 0; i < 3; ++i) {
const Index full_size = full_chunk_shape[3 - i];
partial_chunk_shape[3 - i] = std::min(
scale.box.shape()[i] - chunk_indices[i] * full_size, full_size);
}
}
Result<SharedArray<const void>> DecodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
StridedLayoutView<4> chunk_layout,
absl::Cord buffer) {
const auto& scale_metadata = metadata.scales[scale_index];
std::array<Index, 4> chunk_shape;
GetChunkShape(chunk_indices, metadata, scale_index, chunk_layout.shape(),
chunk_shape);
switch (scale_metadata.encoding) {
case ScaleMetadata::Encoding::raw:
return DecodeRawChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::png:
return DecodePngChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::jpeg:
return DecodeJpegChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::compressed_segmentation:
return DecodeCompressedSegmentationChunk(
metadata.dtype, chunk_shape, chunk_layout,
scale_metadata.compressed_segmentation_block_size, std::move(buffer));
}
ABSL_UNREACHABLE();
}
absl::Cord EncodeRawChunk(DataType dtype, span<const Index, 4> shape,
const SharedArrayView<const void>& array) {
ArrayView<const void> partial_source(
array.element_pointer(),
StridedLayoutView<>(shape, array.byte_strides()));
internal::FlatCordBuilder buffer(ProductOfExtents(shape) * dtype.size());
Array<void, 4> encoded_array({static_cast<void*>(buffer.data()), dtype},
shape);
internal::EncodeArray(partial_source, encoded_array, endian::little);
return std::move(buffer).Build();
}
template <typename ImageWriter, typename Options>
Result<absl::Cord> EncodeImageChunk(Options options, DataType dtype,
span<const Index, 4> shape,
ArrayView<const void> array) {
Array<const void, 4> partial_source(
array.element_pointer(),
StridedLayout<4>({shape[1], shape[2], shape[3], shape[0]},
{array.byte_strides()[1], array.byte_strides()[2],
array.byte_strides()[3], array.byte_strides()[0]}));
auto contiguous_array = MakeCopy(partial_source, c_order);
absl::Cord buffer;
{
ImageWriter writer;
riegeli::CordWriter<> cord_writer(&buffer);
TENSORSTORE_RETURN_IF_ERROR(writer.Initialize(&cord_writer, options));
ImageInfo info{static_cast<int32_t>(shape[3]),
static_cast<int32_t>(shape[1] * shape[2]),
static_cast<int32_t>(shape[0]),
dtype};
TENSORSTORE_RETURN_IF_ERROR(writer.Encode(
info, tensorstore::span(reinterpret_cast<const unsigned char*>(
contiguous_array.data()),
contiguous_array.num_elements() *
contiguous_array.dtype().size())));
TENSORSTORE_RETURN_IF_ERROR(writer.Done());
}
return buffer;
}
Result<absl::Cord> EncodeJpegChunk(DataType dtype, int quality,
span<const Index, 4> shape,
ArrayView<const void> array) {
internal_image::JpegWriterOptions options;
options.quality = quality;
return EncodeImageChunk<internal_image::JpegWriter>(options, dtype, shape,
array);
}
Result<absl::Cord> EncodePngChunk(DataType dtype, int compression_level,
span<const Index, 4> shape,
ArrayView<const void> array) {
internal_image::PngWriterOptions options;
options.compression_level = compression_level;
return EncodeImageChunk<internal_image::PngWriter>(options, dtype, shape,
array);
}
Result<absl::Cord> EncodeCompressedSegmentationChunk(
DataType dtype, span<const Index, 4> shape, ArrayView<const void> array,
std::array<Index, 3> block_size) {
std::ptrdiff_t input_shape_ptrdiff_t[4] = {shape[0], shape[1], shape[2],
shape[3]};
std::ptrdiff_t block_shape_ptrdiff_t[3] = {block_size[2], block_size[1],
block_size[0]};
std::string out;
std::ptrdiff_t input_byte_strides[4] = {
array.byte_strides()[0], array.byte_strides()[1], array.byte_strides()[2],
array.byte_strides()[3]};
switch (dtype.id()) {
case DataTypeId::uint32_t:
neuroglancer_compressed_segmentation::EncodeChannels(
static_cast<const uint32_t*>(array.data()), input_shape_ptrdiff_t,
input_byte_strides, block_shape_ptrdiff_t, &out);
break;
case DataTypeId::uint64_t:
neuroglancer_compressed_segmentation::EncodeChannels(
static_cast<const uint64_t*>(array.data()), input_shape_ptrdiff_t,
input_byte_strides, block_shape_ptrdiff_t, &out);
break;
default:
ABSL_UNREACHABLE();
}
return absl::Cord(std::move(out));
}
Result<absl::Cord> EncodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
const SharedArrayView<const void>& array) {
const auto& scale_metadata = metadata.scales[scale_index];
std::array<Index, 4> partial_chunk_shape;
GetChunkShape(chunk_indices, metadata, scale_index,
span<const Index, 4>(array.shape().data(), 4),
partial_chunk_shape);
switch (scale_metadata.encoding) {
case ScaleMetadata::Encoding::raw:
return EncodeRawChunk(metadata.dtype, partial_chunk_shape, array);
case ScaleMetadata::Encoding::jpeg:
return EncodeJpegChunk(metadata.dtype, scale_metadata.jpeg_quality,
partial_chunk_shape, array);
case ScaleMetadata::Encoding::png:
return EncodePngChunk(metadata.dtype, scale_metadata.png_level,
partial_chunk_shape, array);
case ScaleMetadata::Encoding::compressed_segmentation:
return EncodeCompressedSegmentationChunk(
metadata.dtype, partial_chunk_shape, array,
scale_metadata.compressed_segmentation_block_size);
}
ABSL_UNREACHABLE();
}
}
} | #include "tensorstore/driver/neuroglancer_precomputed/chunk_encoding.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/neuroglancer_precomputed/metadata.h"
#include "tensorstore/index.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_neuroglancer_precomputed::DecodeChunk;
using ::tensorstore::internal_neuroglancer_precomputed::EncodeChunk;
using ::tensorstore::internal_neuroglancer_precomputed::MultiscaleMetadata;
struct P {
::nlohmann::json metadata_json;
tensorstore::DataType dtype;
bool compare = true;
bool truncate = true;
};
class ChunkEncodingTest : public testing::TestWithParam<P> {
public:
template <typename T>
tensorstore::SharedArray<void> AllocateArrayImpl(Index num_channels) {
auto array = tensorstore::AllocateArray<T>({num_channels, 5, 4, 3});
for (Index i = 0, n = array.num_elements(); i < n; ++i) {
array.data()[i] = static_cast<T>(i);
}
return array;
}
tensorstore::SharedArray<void> GetArrayForDType(tensorstore::DataTypeId id,
Index num_channels) {
switch (id) {
case tensorstore::DataTypeId::uint8_t:
return AllocateArrayImpl<uint8_t>(num_channels);
case tensorstore::DataTypeId::uint16_t:
return AllocateArrayImpl<uint16_t>(num_channels);
case tensorstore::DataTypeId::uint32_t:
return AllocateArrayImpl<uint32_t>(num_channels);
case tensorstore::DataTypeId::uint64_t:
return AllocateArrayImpl<uint64_t>(num_channels);
default:
ABSL_UNREACHABLE();
}
}
};
TEST_P(ChunkEncodingTest, Roundtrip) {
auto metadata_json = GetParam().metadata_json;
auto dtype = GetParam().dtype;
metadata_json["data_type"] = dtype.name();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
MultiscaleMetadata::FromJson(metadata_json));
auto array = GetArrayForDType(dtype.id(), metadata.num_channels);
std::vector<Index> chunk_indices{0, 0, 0};
const size_t scale_index = 0;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord out, EncodeChunk(chunk_indices, metadata, scale_index, array));
tensorstore::StridedLayout chunk_layout(tensorstore::c_order, dtype.size(),
{metadata.num_channels, 5, 4, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decode_result,
DecodeChunk(chunk_indices, metadata, scale_index, chunk_layout, out));
if (!out.empty() && GetParam().truncate) {
auto corrupt = out.Subcord(0, out.size() - 1);
EXPECT_THAT(
DecodeChunk(chunk_indices, metadata, scale_index, chunk_layout,
corrupt),
testing::AnyOf(MatchesStatus(absl::StatusCode::kDataLoss),
MatchesStatus(absl::StatusCode::kInvalidArgument)));
}
if (GetParam().compare) {
EXPECT_THAT(decode_result, array);
}
}
std::vector<P> GenerateParams() {
std::vector<P> result;
for (const int num_channels : {1, 2, 3, 4}) {
P param;
param.metadata_json =
::nlohmann::json{{"@type", "neuroglancer_multiscale_volume"},
{"num_channels", num_channels},
{"scales",
{{{"chunk_sizes", {{3, 4, 5}}},
{"encoding", "raw"},
{"key", "k"},
{"resolution", {5, 6, 7}},
{"size", {10, 11, 12}}}}},
{"type", "image"}};
param.dtype = tensorstore::dtype_v<uint16_t>;
result.push_back(param);
param.truncate = false;
if (num_channels >= 1 && num_channels <= 4) {
param.metadata_json["scales"][0]["encoding"] = "png";
param.dtype = tensorstore::dtype_v<uint8_t>;
result.push_back(param);
if (num_channels == 1) {
param.dtype = tensorstore::dtype_v<uint16_t>;
result.push_back(param);
}
}
param.truncate = true;
param.compare = false;
if (num_channels == 1 || num_channels == 3) {
param.metadata_json["scales"][0]["encoding"] = "jpeg";
param.dtype = tensorstore::dtype_v<uint8_t>;
result.push_back(param);
}
param.compare = true;
param.metadata_json["scales"][0]["encoding"] = "compressed_segmentation";
param.metadata_json["scales"][0]["compressed_segmentation_block_size"] = {
2, 3, 4};
param.dtype = tensorstore::dtype_v<uint32_t>;
result.push_back(param);
param.dtype = tensorstore::dtype_v<uint64_t>;
result.push_back(param);
}
return result;
}
INSTANTIATE_TEST_SUITE_P(
All, ChunkEncodingTest, testing::ValuesIn(GenerateParams()),
[](const testing::TestParamInfo<P>& info) {
const auto& p = info.param;
auto encoding =
p.metadata_json["scales"][0]["encoding"].get<std::string>();
return tensorstore::StrCat(encoding, "_", p.metadata_json["num_channels"],
"_", p.dtype.name());
});
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/neuroglancer_precomputed/chunk_encoding.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/neuroglancer_precomputed/chunk_encoding_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
923ae6f7-74bb-4941-b236-d1ca0cc1e804 | cpp | google/tensorstore | metadata | tensorstore/internal/metrics/metadata.cc | tensorstore/internal/metrics/metadata_test.cc | #include "tensorstore/internal/metrics/metadata.h"
#include <cstddef>
#include <string_view>
#include "absl/base/optimization.h"
#include "absl/strings/ascii.h"
namespace tensorstore {
namespace internal_metrics {
bool IsValidMetricName(std::string_view name) {
if (name.size() < 2) return false;
if (name[0] != '/') return false;
if (name[name.size() - 1] == '/') return false;
if (!absl::ascii_isalpha(name[1])) return false;
size_t last_slash = 0;
for (size_t i = 1; i < name.size(); i++) {
const auto ch = name[i];
if (ch == '/') {
if (i - last_slash == 1) return false;
if (i - last_slash > 63) return false;
last_slash = i;
} else if (ch != '_' && !absl::ascii_isalnum(ch)) {
return false;
}
}
return true;
}
bool IsValidMetricLabel(std::string_view name) {
if (name.empty()) return false;
if (!absl::ascii_isalpha(name[0])) return false;
for (auto ch : name) {
if (ch != '_' && !absl::ascii_isalnum(ch)) {
return false;
}
}
return true;
}
std::string_view UnitsToString(Units units) {
switch (units) {
case Units::kUnknown:
return {};
case Units::kSeconds:
return "seconds";
case Units::kMilliseconds:
return "milliseconds";
case Units::kMicroseconds:
return "microseconds";
case Units::kNanoseconds:
return "nanoseconds";
case Units::kBits:
return "bits";
case Units::kBytes:
return "bytes";
case Units::kKilobytes:
return "kilobytes";
case Units::kMegabytes:
return "megabytes";
}
ABSL_UNREACHABLE();
}
}
} | #include "tensorstore/internal/metrics/metadata.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_metrics::IsValidMetricLabel;
using ::tensorstore::internal_metrics::IsValidMetricName;
using ::tensorstore::internal_metrics::Units;
using ::tensorstore::internal_metrics::UnitsToString;
TEST(MetadataTest, IsValidMetricName) {
EXPECT_FALSE(IsValidMetricName(""));
EXPECT_FALSE(IsValidMetricName("/"));
EXPECT_FALSE(IsValidMetricName("
EXPECT_FALSE(IsValidMetricName("/foo/"));
EXPECT_FALSE(IsValidMetricName("/foo
EXPECT_FALSE(IsValidMetricName("/_foo"));
EXPECT_FALSE(IsValidMetricName("/foo%"));
EXPECT_FALSE(IsValidMetricName("/foo%"));
EXPECT_FALSE(IsValidMetricName("/foo.bar"));
EXPECT_FALSE(IsValidMetricName("foo_1"));
EXPECT_TRUE(IsValidMetricName("/foo/1_bar/Baz"));
}
TEST(MetadataTest, IsValidMetricLabel) {
EXPECT_FALSE(IsValidMetricLabel(""));
EXPECT_FALSE(IsValidMetricLabel("/"));
EXPECT_FALSE(IsValidMetricLabel("1_bar"));
EXPECT_FALSE(IsValidMetricLabel("_bar"));
EXPECT_FALSE(IsValidMetricLabel("foo/bar"));
EXPECT_FALSE(IsValidMetricLabel("foo-bar"));
EXPECT_FALSE(IsValidMetricLabel("foo.bar"));
EXPECT_TRUE(IsValidMetricLabel("a"));
EXPECT_TRUE(IsValidMetricLabel("foB_1"));
}
TEST(MetadataTest, UnitsToString) {
EXPECT_THAT(UnitsToString(Units::kUnknown), ::testing::IsEmpty());
EXPECT_THAT(UnitsToString(Units::kSeconds), ::testing::Eq("seconds"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/metadata.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/metadata_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ea731c06-0094-4eaa-a57c-795a590e84d8 | cpp | google/tensorstore | chunk_cache | tensorstore/internal/cache/chunk_cache.cc | tensorstore/internal/cache/chunk_cache_test.cc | #include "tensorstore/internal/cache/chunk_cache.h"
#include <stddef.h>
#include <atomic>
#include <cassert>
#include <cstdint>
#include <memory>
#include <mutex>
#include <string_view>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/chunk_receiver_utils.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/lock_collection.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/rank.h"
#include "tensorstore/read_write_options.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
using ::tensorstore::internal_metrics::MetricMetadata;
#ifndef TENSORSTORE_INTERNAL_CHUNK_CACHE_DEBUG
#define TENSORSTORE_INTERNAL_CHUNK_CACHE_DEBUG 0
#endif
namespace tensorstore {
namespace internal {
auto& num_writes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/chunk_cache/writes",
MetricMetadata("Number of writes to ChunkCache."));
auto& num_reads = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/chunk_cache/reads",
MetricMetadata("Number of reads from ChunkCache."));
namespace {
bool IsFullyOverwritten(ChunkCache::TransactionNode& node) {
auto& entry = GetOwningEntry(node);
const auto& grid = GetOwningCache(entry).grid();
const auto& component_specs = grid.components;
const tensorstore::span<const Index> cell_indices = entry.cell_indices();
for (size_t component_index = 0, num_components = component_specs.size();
component_index != num_components; ++component_index) {
if (!node.components()[component_index].write_state.IsFullyOverwritten(
component_specs[component_index].array_spec,
grid.GetCellDomain(component_index, cell_indices))) {
return false;
}
}
return true;
}
struct ReadChunkImpl {
size_t component_index;
PinnedCacheEntry<ChunkCache> entry;
absl::Status operator()(internal::LockCollection& lock_collection) const {
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
Arena* arena) const {
auto& grid = GetOwningCache(*entry).grid();
auto domain = grid.GetCellDomain(component_index, entry->cell_indices());
SharedArray<const void, dynamic_rank(kMaxRank)> read_array{
ChunkCache::GetReadComponent(
AsyncCache::ReadLock<ChunkCache::ReadData>(*entry).data(),
component_index)};
return grid.components[component_index].array_spec.GetReadNDIterable(
std::move(read_array), domain, std::move(chunk_transform), arena);
}
};
struct ReadChunkTransactionImpl {
size_t component_index;
OpenTransactionNodePtr<ChunkCache::TransactionNode> node;
absl::Status operator()(internal::LockCollection& lock_collection) const {
constexpr auto lock_chunk = [](void* data, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS -> bool {
auto& node = *static_cast<ChunkCache::TransactionNode*>(data);
if (lock) {
node.WriterLock();
} else {
node.WriterUnlock();
}
return true;
};
lock_collection.Register(node.get(), +lock_chunk, true);
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
Arena* arena) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto& component = node->components()[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
SharedArray<const void, dynamic_rank(kMaxRank)> read_array;
StorageGeneration read_generation;
{
AsyncCache::ReadLock<ChunkCache::ReadData> read_lock(*node);
read_array =
ChunkCache::GetReadComponent(read_lock.data(), component_index);
read_generation = read_lock.stamp().generation;
if (!node->IsUnconditional() &&
(node->transaction()->mode() & repeatable_read)) {
TENSORSTORE_RETURN_IF_ERROR(
node->RequireRepeatableRead(read_generation));
}
}
return component.GetReadNDIterable(component_spec.array_spec, domain,
std::move(read_array), read_generation,
std::move(chunk_transform), arena);
}
};
struct WriteChunkImpl {
size_t component_index;
OpenTransactionNodePtr<ChunkCache::TransactionNode> node;
absl::Status operator()(internal::LockCollection& lock_collection) {
constexpr auto lock_chunk = [](void* data, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS -> bool {
auto& node = *static_cast<ChunkCache::TransactionNode*>(data);
if (lock) {
return node.try_lock();
} else {
node.WriterUnlock();
return true;
}
};
if (node->IsRevoked()) {
OpenTransactionPtr transaction(node->transaction());
TENSORSTORE_ASSIGN_OR_RETURN(
node, GetTransactionNode(GetOwningEntry(*node), transaction));
}
lock_collection.Register(node.get(), +lock_chunk, false);
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(WriteChunk::BeginWrite,
IndexTransform<> chunk_transform,
Arena* arena) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
node->MarkSizeUpdated();
return node->components()[component_index].BeginWrite(
component_spec.array_spec, domain, std::move(chunk_transform), arena);
}
WriteChunk::EndWriteResult operator()(WriteChunk::EndWrite,
IndexTransformView<> chunk_transform,
bool success, Arena* arena) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
node->components()[component_index].EndWrite(
component_spec.array_spec, domain, chunk_transform, success, arena);
node->is_modified = true;
if (IsFullyOverwritten(*node)) {
node->SetUnconditional();
}
return {node->OnModified(), node->transaction()->future()};
}
bool operator()(WriteChunk::WriteArray, IndexTransformView<> chunk_transform,
WriteChunk::GetWriteSourceArrayFunction get_source_array,
Arena* arena,
WriteChunk::EndWriteResult& end_write_result) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
using WriteArraySourceCapabilities =
AsyncWriteArray::WriteArraySourceCapabilities;
auto status = node->components()[component_index].WriteArray(
component_spec.array_spec, domain, chunk_transform,
[&]() -> Result<std::pair<TransformedSharedArray<const void>,
WriteArraySourceCapabilities>> {
TENSORSTORE_ASSIGN_OR_RETURN(auto info, get_source_array());
auto source_restriction = std::get<1>(info);
WriteArraySourceCapabilities source_capabilities;
switch (source_restriction) {
case cannot_reference_source_data:
source_capabilities = WriteArraySourceCapabilities::kCannotRetain;
break;
case can_reference_source_data_indefinitely:
source_capabilities = WriteArraySourceCapabilities::
kImmutableAndCanRetainIndefinitely;
break;
}
return {std::in_place, std::move(std::get<0>(info)),
source_capabilities};
});
if (!status.ok()) {
if (absl::IsCancelled(status)) return false;
end_write_result = {status};
return true;
}
node->is_modified = true;
node->SetUnconditional();
end_write_result = {node->OnModified(), node->transaction()->future()};
return true;
}
};
}
void ChunkCache::Read(ReadRequest request, ReadChunkReceiver receiver) {
assert(request.component_index >= 0 &&
request.component_index < grid().components.size());
const auto& component_spec = grid().components[request.component_index];
using ReadOperationState = ChunkOperationState<ReadChunk>;
assert(component_spec.chunked_to_cell_dimensions.size() ==
grid().chunk_shape.size());
auto state = MakeIntrusivePtr<ReadOperationState>(std::move(receiver));
internal_grid_partition::RegularGridRef regular_grid{grid().chunk_shape};
auto status = PartitionIndexTransformOverGrid(
component_spec.chunked_to_cell_dimensions, regular_grid,
request.transform,
[&](tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
if (state->cancelled()) {
return absl::CancelledError("");
}
num_reads.Increment();
TENSORSTORE_ASSIGN_OR_RETURN(
auto cell_to_source,
ComposeTransforms(request.transform, cell_transform));
auto entry = GetEntryForGridCell(*this, grid_cell_indices);
ReadChunk chunk;
chunk.transform = std::move(cell_to_source);
Future<const void> read_future;
const auto get_cache_read_request = [&] {
AsyncCache::AsyncCacheReadRequest cache_request;
cache_request.staleness_bound = request.staleness_bound;
cache_request.batch = request.batch;
return cache_request;
};
if (request.transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetTransactionNode(*entry, request.transaction));
read_future = node->IsUnconditional()
? MakeReadyFuture()
: node->Read(get_cache_read_request());
chunk.impl = ReadChunkTransactionImpl{request.component_index,
std::move(node)};
} else {
read_future = entry->Read(get_cache_read_request());
chunk.impl = ReadChunkImpl{request.component_index, std::move(entry)};
}
LinkValue(
[state, chunk = std::move(chunk),
cell_transform = IndexTransform<>(cell_transform)](
Promise<void> promise, ReadyFuture<const void> future) mutable {
execution::set_value(state->shared_receiver->receiver,
std::move(chunk), std::move(cell_transform));
},
state->promise, std::move(read_future));
return absl::OkStatus();
});
if (!status.ok()) {
state->SetError(std::move(status));
}
}
void ChunkCache::Write(WriteRequest request, WriteChunkReceiver receiver) {
assert(request.component_index >= 0 &&
request.component_index < grid().components.size());
const auto& component_spec = grid().components[request.component_index];
std::atomic<bool> cancelled{false};
execution::set_starting(receiver, [&cancelled] { cancelled = true; });
internal_grid_partition::RegularGridRef regular_grid{grid().chunk_shape};
absl::Status status = PartitionIndexTransformOverGrid(
component_spec.chunked_to_cell_dimensions, regular_grid,
request.transform,
[&](tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
if (cancelled) return absl::CancelledError("");
num_writes.Increment();
TENSORSTORE_ASSIGN_OR_RETURN(
auto cell_to_dest,
ComposeTransforms(request.transform, cell_transform));
ABSL_LOG_IF(INFO, TENSORSTORE_INTERNAL_CHUNK_CACHE_DEBUG)
<< "grid_cell_indices=" << grid_cell_indices
<< ", request.transform=" << request.transform
<< ", cell_transform=" << cell_transform
<< ", cell_to_dest=" << cell_to_dest;
auto entry = GetEntryForGridCell(*this, grid_cell_indices);
auto transaction_copy = request.transaction;
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetTransactionNode(*entry, transaction_copy));
execution::set_value(
receiver,
WriteChunk{WriteChunkImpl{request.component_index, std::move(node)},
std::move(cell_to_dest)},
IndexTransform<>(cell_transform));
return absl::OkStatus();
});
if (!status.ok()) {
execution::set_error(receiver, status);
} else {
execution::set_done(receiver);
}
execution::set_stopping(receiver);
}
Future<const void> ChunkCache::DeleteCell(
tensorstore::span<const Index> grid_cell_indices,
internal::OpenTransactionPtr transaction) {
return GetEntryForGridCell(*this, grid_cell_indices)->Delete(transaction);
}
absl::Status ChunkCache::TransactionNode::Delete() {
UniqueWriterLock lock(*this);
this->MarkSizeUpdated();
this->is_modified = true;
auto& entry = GetOwningEntry(*this);
const tensorstore::span<const Index> cell_indices = entry.cell_indices();
const auto& grid = GetOwningCache(entry).grid();
for (Index component_index = 0, num_components = grid.components.size();
component_index != num_components; ++component_index) {
const auto& component_spec = grid.components[component_index];
auto domain = grid.GetCellDomain(component_index, cell_indices);
components()[component_index].write_state.WriteFillValue(
component_spec.array_spec, domain);
}
SetUnconditional();
return OnModified();
}
Future<const void> ChunkCache::Entry::Delete(OpenTransactionPtr transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(auto node,
GetTransactionNode(*this, transaction));
TENSORSTORE_RETURN_IF_ERROR(node->Delete());
return node->transaction()->future();
}
size_t ChunkCache::TransactionNode::ComputeWriteStateSizeInBytes() {
size_t total = 0;
const auto component_specs = this->component_specs();
for (size_t component_index = 0;
component_index < static_cast<size_t>(component_specs.size());
++component_index) {
auto& component_spec = component_specs[component_index];
total +=
this->components()[component_index].write_state.EstimateSizeInBytes(
component_spec.array_spec, component_spec.chunk_shape);
}
return total;
}
size_t ChunkCache::Entry::ComputeReadDataSizeInBytes(const void* read_data) {
const ReadData* components = static_cast<const ReadData*>(read_data);
size_t total = 0;
auto component_specs = this->component_specs();
for (size_t component_index = 0;
component_index < static_cast<size_t>(component_specs.size());
++component_index) {
auto& component_spec = component_specs[component_index];
total += component_spec.array_spec.EstimateReadStateSizeInBytes(
components[component_index].valid(), component_spec.chunk_shape);
}
return total;
}
ChunkCache::WritebackSnapshot::WritebackSnapshot(
TransactionNode& node, AsyncCache::ReadView<ReadData> read_state) {
auto& entry = GetOwningEntry(node);
auto& grid = GetOwningCache(entry).grid();
const tensorstore::span<const Index> cell_indices = entry.cell_indices();
for (size_t component_i = 0; component_i < grid.components.size();
++component_i) {
const auto& component_spec = grid.components[component_i];
auto& component = node.components()[component_i];
auto domain = grid.GetCellDomain(component_i, cell_indices);
auto component_snapshot = component.GetArrayForWriteback(
component_spec.array_spec, domain,
GetReadComponent(read_state.data(), component_i),
read_state.stamp().generation);
if (component_snapshot.must_store) {
if (!new_read_data_) {
new_read_data_ = internal::make_shared_for_overwrite<ReadData[]>(
grid.components.size());
}
new_read_data_.get()[component_i] = std::move(component_snapshot.array);
}
}
}
ChunkCache::TransactionNode::TransactionNode(Entry& entry)
: AsyncCache::TransactionNode(entry) {
const auto& component_specs = GetOwningCache(entry).grid().components;
components_.reserve(component_specs.size());
for (size_t i = 0; i < component_specs.size(); ++i) {
components_.emplace_back(component_specs[i].rank());
}
}
absl::Status ChunkCache::TransactionNode::OnModified() {
return absl::OkStatus();
}
void ChunkCache::TransactionNode::DoApply(ApplyOptions options,
ApplyReceiver receiver) {
if (options.apply_mode == ApplyOptions::kValidateOnly) {
execution::set_value(
receiver, ReadState{{}, TimestampedStorageGeneration::Unconditional()});
return;
}
auto continuation = WithExecutor(
GetOwningCache(*this).executor(),
[this, receiver = std::move(receiver),
specify_unchanged =
options.apply_mode == ApplyOptions::kSpecifyUnchanged](
tensorstore::ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
return execution::set_error(receiver, future.result().status());
}
AsyncCache::ReadState read_state;
if (this->IsUnconditional() ||
(!this->is_modified && !specify_unchanged)) {
read_state.stamp = TimestampedStorageGeneration::Unconditional();
} else {
read_state = AsyncCache::ReadLock<void>(*this).read_state();
}
if (is_modified) {
UniqueWriterLock<AsyncCache::TransactionNode> lock(*this);
WritebackSnapshot snapshot(
*this, AsyncCache::ReadView<ReadData>(read_state));
read_state.data = std::move(snapshot.new_read_data());
read_state.stamp.generation.MarkDirty();
}
execution::set_value(receiver, std::move(read_state));
});
if (this->IsUnconditional() ||
(!this->is_modified &&
options.apply_mode != ApplyOptions::kSpecifyUnchanged)) {
continuation(MakeReadyFuture());
} else {
this->Read({options.staleness_bound})
.ExecuteWhenReady(std::move(continuation));
}
}
void ChunkCache::TransactionNode::InvalidateReadState() {
AsyncCache::TransactionNode::InvalidateReadState();
for (auto& component : components()) {
component.InvalidateReadState();
}
}
}
} | #include "tensorstore/internal/cache/chunk_cache.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cmath>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/inlined_vector.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/context.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/chunk_cache_driver.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_testutil.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/internal/riegeli/array_endian_codec.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/progress.h"
#include "tensorstore/read_write_options.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::ArrayView;
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Executor;
using ::tensorstore::Future;
using ::tensorstore::Index;
using ::tensorstore::IndexTransform;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeCopy;
using ::tensorstore::MatchesStatus;
using ::tensorstore::no_transaction;
using ::tensorstore::ReadProgressFunction;
using ::tensorstore::Result;
using ::tensorstore::SharedArray;
using ::tensorstore::span;
using ::tensorstore::StalenessBound;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TensorStore;
using ::tensorstore::Transaction;
using ::tensorstore::WriteProgressFunction;
using ::tensorstore::internal::AsyncCache;
using ::tensorstore::internal::AsyncWriteArray;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::CachePtr;
using ::tensorstore::internal::ChunkCache;
using ::tensorstore::internal::ChunkGridSpecification;
using ::tensorstore::internal::ConcreteChunkCache;
using ::tensorstore::internal::ElementCopyFunction;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::GetEntryForGridCell;
using ::tensorstore::internal::MakeReadWritePtr;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::ReadWritePtr;
using ::tensorstore::internal::SimpleElementwiseFunction;
using ::testing::ElementsAre;
Result<std::shared_ptr<const ChunkCache::ReadData>> DecodeRaw(
const ChunkGridSpecification& grid, const absl::Cord* value) {
const auto& component_specs = grid.components;
std::shared_ptr<ChunkCache::ReadData> read_data;
if (value) {
read_data = tensorstore::internal::make_shared_for_overwrite<
ChunkCache::ReadData[]>(component_specs.size());
riegeli::CordReader<const absl::Cord*> reader{value};
for (size_t component_i = 0; component_i < component_specs.size();
++component_i) {
const auto& spec = component_specs[component_i];
TENSORSTORE_ASSIGN_OR_RETURN(
read_data.get()[component_i],
tensorstore::internal::DecodeArrayEndian(
reader, spec.dtype(), spec.shape(), tensorstore::endian::native,
tensorstore::c_order));
}
if (!reader.VerifyEndAndClose()) return reader.status();
}
return std::static_pointer_cast<ChunkCache::ReadData>(std::move(read_data));
}
template <typename ComponentArrays = std::vector<SharedArray<const void>>>
absl::Cord EncodeRaw(const ChunkGridSpecification& grid,
const ComponentArrays& component_arrays) {
absl::Cord value;
riegeli::CordWriter<absl::Cord*> writer{&value};
const auto& component_specs = grid.components;
for (size_t component_i = 0; component_i < component_specs.size();
++component_i) {
const auto& spec = component_specs[component_i];
auto& array = component_arrays[component_i];
ABSL_CHECK(tensorstore::internal::RangesEqual(array.shape(), spec.shape()));
ABSL_CHECK(array.dtype() == spec.dtype());
ABSL_CHECK(tensorstore::internal::EncodeArrayEndian(
array, tensorstore::endian::native, tensorstore::c_order, writer));
}
ABSL_CHECK(writer.Close());
return value;
}
std::string EncodeKey(span<const Index> indices) {
return absl::StrJoin(indices, ",");
}
class TestCache
: public tensorstore::internal::KvsBackedCache<TestCache,
ConcreteChunkCache> {
using Base =
tensorstore::internal::KvsBackedCache<TestCache, ConcreteChunkCache>;
public:
using Base::Base;
class Entry : public Base::Entry {
public:
using OwningCache = TestCache;
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()([this, value = std::move(value),
receiver =
std::move(receiver)]() mutable {
TENSORSTORE_ASSIGN_OR_RETURN(
auto read_data,
DecodeRaw(GetOwningCache(*this).grid(), value ? &*value : nullptr),
tensorstore::execution::set_error(receiver, _));
tensorstore::execution::set_value(receiver, std::move(read_data));
});
}
void DoEncode(std::shared_ptr<const ReadData> data,
EncodeReceiver receiver) override {
std::optional<absl::Cord> encoded;
if (data) {
encoded = EncodeRaw(GetOwningCache(*this).grid(), data.get());
}
tensorstore::execution::set_value(receiver, std::move(encoded));
}
std::string GetKeyValueStoreKey() override {
return EncodeKey(this->cell_indices());
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
};
class TestDriver : public tensorstore::internal::ChunkCacheDriver {
public:
using ::tensorstore::internal::ChunkCacheDriver::ChunkCacheDriver;
void GarbageCollectionVisit(
tensorstore::garbage_collection::GarbageCollectionVisitor& visitor)
const final {
}
};
template <typename T>
ElementCopyFunction GetCopyFunction() {
[[maybe_unused]] const auto copy_func =
[](const T* source, T* dest, absl::Status* status) { *dest = *source; };
return SimpleElementwiseFunction<decltype(copy_func), const T, T>();
}
TEST(ChunkGridSpecificationTest, Basic) {
ChunkGridSpecification grid({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{SharedArray<const void>(MakeArray<int>({1, 2})),
Box<>(1)},
{2}}});
EXPECT_EQ(1, grid.components[0].rank());
EXPECT_EQ(1, grid.components[0].chunked_to_cell_dimensions.size());
EXPECT_EQ(1, grid.chunk_shape.size());
absl::InlinedVector<Index, 1> origin;
origin.resize(grid.components[0].rank());
grid.GetComponentOrigin(0, span<const Index>({0}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0));
grid.GetComponentOrigin(0, span<const Index>({1}), origin);
EXPECT_THAT(origin, testing::ElementsAre(2));
}
TEST(ChunkGridSpecificationTest, MoreComplicated) {
std::vector<Index> shape = {1, 2, 3, 4};
SharedArray<const void> fill_value(
tensorstore::internal::AllocateAndConstructSharedElements(
1, tensorstore::value_init, tensorstore::dtype_v<int>),
tensorstore::StridedLayout<>(
shape, tensorstore::GetConstantVector<Index, 0, 4>()));
ChunkGridSpecification grid({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{fill_value, Box<>(shape)},
shape,
{3, 2, 1}}});
EXPECT_EQ(3, grid.chunk_shape.size());
EXPECT_THAT(grid.chunk_shape, testing::ElementsAre(4, 3, 2));
EXPECT_EQ(4, grid.components[0].array_spec.overall_fill_value.rank());
EXPECT_EQ(4, grid.components[0].rank());
EXPECT_EQ(3, grid.components[0].chunked_to_cell_dimensions.size());
EXPECT_THAT(grid.components[0].chunked_to_cell_dimensions,
testing::ElementsAre(3, 2, 1));
absl::InlinedVector<Index, 4> origin;
origin.resize(grid.components[0].rank());
grid.GetComponentOrigin(0, span<const Index>({0, 0, 0}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0, 0, 0, 0));
grid.GetComponentOrigin(0, span<const Index>({1, 1, 1}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0, 2, 3, 4));
grid.GetComponentOrigin(0, span<const Index>({3, 2, 1}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0, 2, 6, 12));
}
std::vector<Index> ParseKey(std::string_view key) {
std::vector<Index> result;
for (auto s : absl::StrSplit(key, ',')) {
Index i = 0;
ABSL_CHECK(absl::SimpleAtoi(s, &i));
result.push_back(i);
}
return result;
}
ReadWritePtr<TestDriver> MakeDriver(CachePtr<ChunkCache> cache,
size_t component_index = 0,
StalenessBound data_staleness = {}) {
return MakeReadWritePtr<TestDriver>(
tensorstore::ReadWriteMode::read_write,
TestDriver::Initializer{std::move(cache), component_index,
data_staleness});
}
class ChunkCacheTest : public ::testing::Test {
public:
Executor thread_pool = tensorstore::internal::DetachedThreadPool(1);
std::optional<ChunkGridSpecification> grid;
kvstore::DriverPtr memory_store = tensorstore::GetMemoryKeyValueStore();
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
std::vector<ChunkCache::ReadData> GetChunk(
const std::vector<Index>& indices) {
auto read_result = memory_store->Read(EncodeKey(indices)).value();
const size_t num_components = grid->components.size();
std::vector<ChunkCache::ReadData> components(num_components);
if (auto read_data =
DecodeRaw(*grid,
read_result.has_value() ? &read_result.value : nullptr)
.value()) {
for (size_t i = 0; i < num_components; ++i) {
components[i] = read_data.get()[i];
}
}
return components;
}
bool HasChunk(const std::vector<Index>& indices) {
auto read_result = memory_store->Read(EncodeKey(indices)).value();
return read_result.has_value();
}
void SetChunk(
const std::vector<Index>& indices,
std::vector<tensorstore::SharedArrayView<const void>> components) {
TENSORSTORE_CHECK_OK(
memory_store->Write(EncodeKey(indices), EncodeRaw(*grid, components)));
}
CachePtr<ChunkCache> MakeChunkCache(
std::string_view cache_identifier = {},
std::optional<CachePool::StrongPtr> pool = {}) {
if (!pool) {
pool = CachePool::Make(CachePool::Limits{10000000});
}
return GetCache<TestCache>(pool->get(), cache_identifier, [&] {
return std::make_unique<TestCache>(mock_store, *grid, thread_pool);
});
}
TensorStore<> GetTensorStore(CachePtr<ChunkCache> cache = {},
StalenessBound data_staleness = {},
size_t component_index = 0,
Transaction transaction = no_transaction) {
if (!cache) cache = MakeChunkCache();
return tensorstore::internal::TensorStoreAccess::Construct<TensorStore<>>(
tensorstore::internal::Driver::Handle{
MakeDriver(cache, component_index, data_staleness),
tensorstore::IdentityTransform(
grid->components[component_index].rank()),
transaction});
}
};
template <typename T>
tensorstore::SharedOffsetArray<T> MakeSequentialArray(BoxView<> domain) {
auto array = tensorstore::AllocateArray<T>(domain);
T value = T{};
IterateOverArrays(
[&](T* ptr) {
*ptr = value;
++value;
},
tensorstore::c_order, array);
return array;
}
ChunkGridSpecification GetSimple1DGrid() {
return ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{MakeSequentialArray<int>(BoxView<>{{0}, {10}}),
Box<>(1)},
{2}}});
}
TEST_F(ChunkCacheTest, ReadSingleComponentOneDimensionalFill) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfiniteFuture()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
}
TEST_F(ChunkCacheTest, CancelRead) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
mock_store->forward_to = memory_store;
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
}
}
struct CancelWriteReceiver {
friend void set_starting(CancelWriteReceiver& receiver,
tensorstore::AnyCancelReceiver cancel) {
receiver.cancel = std::move(cancel);
}
friend void set_value(CancelWriteReceiver& receiver,
tensorstore::internal::WriteChunk chunk,
tensorstore::IndexTransform<> cell_transform) {
EXPECT_FALSE(receiver.set_value_called);
receiver.set_value_called = true;
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_single_input_dimension(0, 3, 1, 0)
.Finalize()
.value(),
chunk.transform);
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
cell_transform);
receiver.cancel();
}
friend void set_done(CancelWriteReceiver& receiver) {}
friend void set_error(CancelWriteReceiver& receiver, absl::Status status) {}
friend void set_stopping(CancelWriteReceiver& receiver) {
receiver.cancel = nullptr;
}
bool set_value_called = false;
tensorstore::AnyCancelReceiver cancel;
};
TEST_F(ChunkCacheTest, CancelWrite) {
grid = GetSimple1DGrid();
CancelWriteReceiver receiver;
auto cache = MakeChunkCache();
cache->Write(
ChunkCache::WriteRequest{
{{},
(tensorstore::IdentityTransform(1) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3))
.value()},
0},
std::ref(receiver));
EXPECT_TRUE(receiver.set_value_called);
}
TEST_F(ChunkCacheTest, DriverDataType) {
grid = ChunkGridSpecification({
ChunkGridSpecification::Component{
AsyncWriteArray::Spec{SharedArray<const void>(MakeArray<int>({1, 2})),
Box<>(1)},
{2}},
ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
SharedArray<const void>(MakeArray<float>({{1, 2}, {3, 4}})),
Box<>(2)},
{2, 2},
{1}},
});
auto cache = MakeChunkCache();
EXPECT_EQ(tensorstore::dtype_v<int>, MakeDriver(cache, 0)->dtype());
EXPECT_EQ(tensorstore::dtype_v<float>, MakeDriver(cache, 1)->dtype());
}
TEST_F(ChunkCacheTest, ReadSingleComponentOneDimensionalExisting) {
grid = GetSimple1DGrid();
SetChunk({1}, {MakeArray<int>({42, 43})});
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({43, 4, 5})));
}
SetChunk({2}, {MakeArray<int>({44, 45})});
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({43, 4, 5})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfiniteFuture()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({43, 44, 45})));
}
}
TEST_F(ChunkCacheTest, TwoDimensional) {
grid = ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
MakeSequentialArray<int>(BoxView<>({0, 0}, {10, 100})), Box<>(2)},
{2, 3},
{1, 0}}});
auto cache = MakeChunkCache();
auto read_future = tensorstore::Read(
GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0, 1).TranslateSizedInterval({1, 5}, {6, 5}));
for (auto cell_indices : std::vector<std::vector<Index>>{{1, 0},
{1, 1},
{1, 2},
{1, 3},
{2, 0},
{2, 1},
{2, 2},
{2, 3},
{3, 0},
{3, 1},
{3, 2},
{3, 3}}) {
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ::testing::ElementsAreArray(cell_indices));
r(memory_store);
}
EXPECT_THAT(read_future.result(), ::testing::Optional(MakeArray<int>({
{105, 106, 107, 108, 109},
{205, 206, 207, 208, 209},
{305, 306, 307, 308, 309},
{405, 406, 407, 408, 409},
{505, 506, 507, 508, 509},
{605, 606, 607, 608, 609},
})));
}
TEST_F(ChunkCacheTest, ReadRequestErrorBasic) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r.promise.SetResult(absl::UnknownError("Test read error"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading .*: Test read error"));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r.promise.SetResult(absl::UnknownError("Test read error 2"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading .*: Test read error 2"));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfiniteFuture()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
}
TEST_F(ChunkCacheTest, WriteSingleComponentOneDimensional) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(6, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(3));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({6, 7})));
}
auto write_future =
tensorstore::Write(MakeArray<int>({13, 14, 15, 16}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(3, 4));
write_future.Force();
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
std::vector<std::pair<std::vector<Index>, StorageGeneration>> write_requests;
for (size_t i = 0; i < 3; ++i) {
auto r = mock_store->write_requests.pop();
write_requests.emplace_back(ParseKey(r.key),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(
write_requests,
::testing::UnorderedElementsAre(
::testing::Pair(ElementsAre(2), StorageGeneration::Unknown()),
::testing::Pair(ElementsAre(3), StorageGeneration::NoValue()),
::testing::Pair(ElementsAre(1), StorageGeneration::NoValue())));
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray<int>({2, 13})));
EXPECT_THAT(GetChunk({2}), ElementsAre(MakeArray<int>({14, 15})));
EXPECT_THAT(GetChunk({3}), ElementsAre(MakeArray<int>({16, 7})));
TENSORSTORE_EXPECT_OK(write_future);
}
TEST_F(ChunkCacheTest, WriteSingleComponentOneDimensionalCacheDisabled) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache("", CachePool::StrongPtr{});
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(6, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(3));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({6, 7})));
}
auto write_future =
tensorstore::Write(MakeArray<int>({13, 14, 15, 16}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(3, 4));
write_future.Force();
{
std::vector<std::vector<Index>> read_requests;
for (size_t i = 0; i < 2; ++i) {
auto r = mock_store->read_requests.pop();
read_requests.emplace_back(ParseKey(r.key));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(read_requests, ::testing::UnorderedElementsAre(ElementsAre(1),
ElementsAre(3)));
}
{
std::vector<std::pair<std::vector<Index>, StorageGeneration>>
write_requests;
for (size_t i = 0; i < 3; ++i) {
auto r = mock_store->write_requests.pop();
write_requests.emplace_back(ParseKey(r.key),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(
write_requests,
::testing::UnorderedElementsAre(
::testing::Pair(ElementsAre(2), StorageGeneration::Unknown()),
::testing::Pair(ElementsAre(3), StorageGeneration::NoValue()),
::testing::Pair(ElementsAre(1), StorageGeneration::NoValue())));
}
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray<int>({2, 13})));
EXPECT_THAT(GetChunk({2}), ElementsAre(MakeArray<int>({14, 15})));
EXPECT_THAT(GetChunk({3}), ElementsAre(MakeArray<int>({16, 7})));
TENSORSTORE_EXPECT_OK(write_future);
}
TEST_F(ChunkCacheTest,
WriteSingleComponentOneDimensionalWithTransactionCacheDisabled) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache("", CachePool::StrongPtr{});
Transaction transaction(tensorstore::isolated);
auto write_future = tensorstore::Write(
MakeArray<int>({13, 14, 15, 16}),
GetTensorStore(cache, {}, 0,
transaction) |
tensorstore::Dims(0).TranslateSizedInterval(3, 4));
TENSORSTORE_EXPECT_OK(write_future.result());
{
auto read_future = tensorstore::Read(
GetTensorStore(cache, absl::InfinitePast(),
0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(6, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(3));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({16, 7})));
}
auto commit_future = transaction.CommitAsync();
{
std::vector<std::vector<Index>> read_requests;
for (size_t i = 0; i < 1; ++i) {
auto r = mock_store->read_requests.pop();
read_requests.emplace_back(ParseKey(r.key));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(read_requests, ::testing::UnorderedElementsAre(ElementsAre(1)));
}
{
std::vector<std::pair<std::vector<Index>, StorageGeneration>>
write_requests;
for (size_t i = 0; i < 3; ++i) {
auto r = mock_store->write_requests.pop();
write_requests.emplace_back(ParseKey(r.key),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(
write_requests,
::testing::UnorderedElementsAre(
::testing::Pair(ElementsAre(2), StorageGeneration::Unknown()),
::testing::Pair(ElementsAre(3), StorageGeneration::NoValue()),
::testing::Pair(ElementsAre(1), StorageGeneration::NoValue())));
}
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray<int>({2, 13})));
EXPECT_THAT(GetChunk({2}), ElementsAre(MakeArray<int>({14, 15})));
EXPECT_THAT(GetChunk({3}), ElementsAre(MakeArray<int>({16, 7})));
TENSORSTORE_EXPECT_OK(commit_future);
}
TEST_F(ChunkCacheTest, WriteAfterReadWithTransactionCacheDisabled) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache("", CachePool::StrongPtr{});
Transaction transaction(tensorstore::isolated);
{
auto read_future = tensorstore::Read(
GetTensorStore(cache, absl::InfinitePast(),
0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({0, 1})));
}
auto write_future = tensorstore::Write(
MakeArray<int>({13}),
GetTensorStore(cache, {}, 0,
transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 1));
TENSORSTORE_EXPECT_OK(write_future.result());
auto commit_future = transaction.CommitAsync();
{
std::vector<std::pair<std::vector<Index>, StorageGeneration>>
write_requests;
for (size_t i = 0; i < 1; ++i) {
auto r = mock_store->write_requests.pop();
write_requests.emplace_back(ParseKey(r.key),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(write_requests,
::testing::UnorderedElementsAre(::testing::Pair(
ElementsAre(0), StorageGeneration::NoValue())));
}
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray<int>({13, 1})));
TENSORSTORE_EXPECT_OK(commit_future);
}
TEST_F(ChunkCacheTest, OverwriteMissingWithFillValue) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
auto cell_entry = GetEntryForGridCell(*cache, span<const Index>({1}));
auto write_future =
tensorstore::Write(MakeArray<int>({2, 3}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_FALSE(HasChunk({1}));
TENSORSTORE_EXPECT_OK(write_future);
}
TEST_F(ChunkCacheTest, OverwriteExistingWithFillValue) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
auto cell_entry = GetEntryForGridCell(*cache, span<const Index>({1}));
{
auto write_future = tensorstore::Write(
MakeArray<int>({3, 4}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
EXPECT_TRUE(HasChunk({1}));
}
{
auto write_future = tensorstore::Write(
MakeArray<int>({2, 3}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_FALSE(HasChunk({1}));
TENSORSTORE_EXPECT_OK(write_future);
}
}
TEST_F(ChunkCacheTest, FillValueIdenticallyEqual) {
grid = ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
SharedArray<const void>(MakeArray<float>({NAN, -0.0, NAN, -0.0})),
Box<>(1)},
{2}}});
auto cache = MakeChunkCache();
auto cell_entry = GetEntryForGridCell(*cache, span<const Index>({1}));
{
auto write_future = tensorstore::Write(
MakeArray<float>({NAN, +0.0}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
EXPECT_TRUE(HasChunk({1}));
}
{
auto write_future = tensorstore::Write(
MakeArray<float>({NAN, -0.0}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_FALSE(HasChunk({1}));
TENSORSTORE_EXPECT_OK(write_future);
}
}
TEST_F(ChunkCacheTest, DeleteAfterNormalWriteback) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
auto cell_entry = GetEntryForGridCell(*cache, span<const Index>({1}));
{
auto write_future = tensorstore::Write(
MakeArray<int>({3, 4}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
EXPECT_TRUE(HasChunk({1}));
}
auto write_future = cell_entry->Delete({});
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_FALSE(HasChunk({1}));
TENSORSTORE_EXPECT_OK(write_future);
}
TEST_F(ChunkCacheTest, PartialWriteAfterWrittenBackDelete) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
auto cell_entry = GetEntryForGridCell(*cache, span<const Index>({1}));
EXPECT_EQ(
nullptr,
ChunkCache::GetReadComponent(
AsyncCache::ReadLock<ChunkCache::ReadData>(*cell_entry).data(), 0)
.data());
auto write_future = cell_entry->Delete({});
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_FALSE(HasChunk({1}));
TENSORSTORE_EXPECT_OK(write_future);
{
auto write_future = tensorstore::Write(
MakeArray<int>({42}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(2, 1));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray({42, 3})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
EXPECT_THAT(read_future.result(),
::testing::Optional(MakeArray<int>({42, 3})));
}
}
TEST_F(ChunkCacheTest, DeleteWithPendingRead) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
auto cell_entry = GetEntryForGridCell(*cache, span<const Index>({1}));
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(2, 2));
auto r = mock_store->read_requests.pop();
auto write_future = cell_entry->Delete({});
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(MakeArray<int>({2, 3})));
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
}
TEST_F(ChunkCacheTest, WriteToMaskedArrayError) {
grid = ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
MakeSequentialArray<int>(BoxView<>{{0, 0}, {10, 10}}), Box<>(2)},
{2, 2},
{0}}});
auto cache = MakeChunkCache();
auto cell_entry = GetEntryForGridCell(*cache, span<const Index>({1}));
auto write_future = tensorstore::Write(
MakeArray<int>({5, 6}),
GetTensorStore(cache)
| tensorstore::Dims(1)
.IndexArraySlice(MakeArray<Index>({2, 2}))
.MoveToBack()
| tensorstore::Dims(0).IndexSlice(2));
EXPECT_THAT(write_future.result(),
MatchesStatus(absl::StatusCode::kOutOfRange));
auto read_future = tensorstore::Read(
GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0, 1).TranslateSizedInterval({2, 0}, {2, 2}));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MakeArray<int>({{20, 21}, {30, 31}})));
}
TEST_F(ChunkCacheTest, WriteGenerationMismatch) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
auto write_future =
tensorstore::Write(MakeArray<int>({3}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(3, 1));
write_future.Force();
SetChunk({1}, {MakeArray({5, 6})});
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
SetChunk({1}, {MakeArray({7, 8})});
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_NE(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_NE(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray({7, 3})));
}
TEST_F(ChunkCacheTest, ModifyDuringWriteback) {
grid = ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
SharedArray<const void>(MakeArray<int>({1, 2, 3, 4})), Box<>(1)},
{4}}});
auto cache = MakeChunkCache();
auto write_future = tensorstore::Write(
MakeArray<int>({5, 6}),
GetTensorStore(cache) |
tensorstore::Dims(0).IndexArraySlice(MakeArray<Index>({1, 3})));
write_future.Force();
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
Future<const void> write_future2;
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_equal);
write_future2 =
tensorstore::Write(
MakeArray<int>({7}),
GetTensorStore(cache) |
tensorstore::Dims(0).IndexArraySlice(MakeArray<Index>({2})))
.commit_future;
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray({1, 5, 3, 6})));
SetChunk({0}, {MakeArray({10, 11, 12, 13})});
write_future2.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_NE(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_NE(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future2);
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray({10, 11, 7, 13})));
}
TEST_F(ChunkCacheTest, FullyOverwritePartialChunk) {
grid = ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{MakeSequentialArray<int>(BoxView<>{{0}, {10}}),
Box<>({1}, {5})},
{4}}});
auto cache = MakeChunkCache();
{
auto write_future = tensorstore::Write(
MakeArray<int>({11, 12, 13}),
GetTensorStore(cache) | tensorstore::Dims(0).HalfOpenInterval(1, 4));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray({0, 11, 12, 13})));
}
TENSORSTORE_ASSERT_OK(write_future);
}
{
auto write_future = tensorstore::Write(
MakeArray<int>({14, 15}),
GetTensorStore(cache) | tensorstore::Dims(0).HalfOpenInterval(4, 6));
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray({14, 15, 6, 7})));
}
TENSORSTORE_ASSERT_OK(write_future);
}
}
TEST_F(ChunkCacheTest, WritebackError) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
auto write_future =
tensorstore::Write(
MakeArray<int>({3, 4}),
GetTensorStore(cache) | tensorstore::Dims(0).SizedInterval(0, 2))
.commit_future;
write_future.Force();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r.promise.SetResult(absl::UnknownError("Writeback error"));
}
EXPECT_THAT(write_future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error writing .*: Writeback error"));
}
class ChunkCacheTransactionalTest : public ChunkCacheTest,
public ::testing::WithParamInterface<bool> {
protected:
bool UseTransaction() const { return GetParam(); }
};
INSTANTIATE_TEST_SUITE_P(Instantiation, ChunkCacheTransactionalTest,
::testing::Bool());
TEST_P(ChunkCacheTransactionalTest, SelfCopyDifferentChunksNoExistingData) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
Transaction transaction{no_transaction};
if (UseTransaction()) {
transaction = Transaction(tensorstore::isolated);
}
auto write_future =
tensorstore::Copy(GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(0, 1),
GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(3, 1));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
write_future.Force();
if (UseTransaction()) {
transaction.CommitAsync().IgnoreFuture();
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
if (UseTransaction()) {
TENSORSTORE_EXPECT_OK(transaction.future());
}
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray<int>({2, 0})));
}
TEST_P(ChunkCacheTransactionalTest, SelfCopyDifferentChunksWithExistingData) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
Transaction transaction{no_transaction};
if (UseTransaction()) {
transaction = Transaction(tensorstore::isolated);
}
auto write_future1 =
tensorstore::Write(MakeArray<int>({42, 43}),
GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
if (!UseTransaction()) {
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
TENSORSTORE_EXPECT_OK(write_future1);
} else {
TENSORSTORE_EXPECT_OK(write_future1.copy_future);
}
auto write_future2 =
tensorstore::Copy(GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(0, 1),
GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(3, 1));
if (!UseTransaction()) {
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future2.copy_future);
write_future1.Force();
write_future2.Force();
if (UseTransaction()) {
transaction.CommitAsync().IgnoreFuture();
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
if (UseTransaction()) {
for (size_t i = 0; i < 2; ++i) {
auto r = mock_store->write_requests.pop();
r(memory_store);
}
} else {
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future1);
TENSORSTORE_EXPECT_OK(write_future2);
if (UseTransaction()) {
TENSORSTORE_EXPECT_OK(transaction.future());
}
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray<int>({42, 43})));
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray<int>({2, 42})));
}
TEST_P(ChunkCacheTransactionalTest, SelfCopySameChunkNoExistingData) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
Transaction transaction{no_transaction};
if (UseTransaction()) {
transaction = Transaction(tensorstore::isolated);
}
auto write_future =
tensorstore::Copy(GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(0, 1),
GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(1, 1));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
write_future.Force();
if (UseTransaction()) {
transaction.CommitAsync().IgnoreFuture();
}
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
if (UseTransaction()) {
TENSORSTORE_EXPECT_OK(transaction.future());
}
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray<int>({0, 0})));
}
TEST_P(ChunkCacheTransactionalTest, SelfCopySameChunkWithExistingData) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
Transaction transaction{no_transaction};
if (UseTransaction()) {
transaction = Transaction(tensorstore::isolated);
}
auto write_future1 =
tensorstore::Write(MakeArray<int>({42, 43}),
GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
TENSORSTORE_EXPECT_OK(write_future1.copy_future);
if (!UseTransaction()) {
write_future1.Force();
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
TENSORSTORE_EXPECT_OK(write_future1);
}
auto write_future2 =
tensorstore::Copy(GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(0, 1),
GetTensorStore(cache, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(1, 1));
if (!UseTransaction()) {
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future2.copy_future);
write_future1.Force();
write_future2.Force();
if (UseTransaction()) {
transaction.CommitAsync().IgnoreFuture();
}
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future1);
TENSORSTORE_EXPECT_OK(write_future2);
if (UseTransaction()) {
TENSORSTORE_EXPECT_OK(transaction.future());
}
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray<int>({42, 42})));
}
TEST_F(ChunkCacheTest, SelfCopySameChunkSeparateCachesWithExistingData) {
grid = GetSimple1DGrid();
auto cache1 = MakeChunkCache();
auto cache2 = MakeChunkCache();
Transaction transaction(tensorstore::isolated);
auto write_future1 =
tensorstore::Write(MakeArray<int>({42, 43}),
GetTensorStore(cache1, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
TENSORSTORE_EXPECT_OK(write_future1.copy_future);
auto write_future2 =
tensorstore::Copy(GetTensorStore(cache1, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(0, 1),
GetTensorStore(cache2, {}, 0, transaction) |
tensorstore::Dims(0).SizedInterval(1, 1));
TENSORSTORE_EXPECT_OK(write_future2.copy_future);
write_future1.Force();
write_future2.Force();
transaction.CommitAsync().IgnoreFuture();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future1);
TENSORSTORE_EXPECT_OK(write_future2);
TENSORSTORE_EXPECT_OK(transaction.future());
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray<int>({42, 42})));
}
TEST_F(ChunkCacheTest, SeparateCachesTransactionalReadThenWrite) {
grid = GetSimple1DGrid();
auto cache1 = MakeChunkCache();
auto cache2 = MakeChunkCache();
Transaction transaction(tensorstore::isolated);
auto read_future =
tensorstore::Read(GetTensorStore(cache1, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MakeArray<int>({0, 1})));
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<int>({42, 43}),
GetTensorStore(cache2, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2)));
transaction.CommitAsync().IgnoreFuture();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(transaction.future());
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray<int>({42, 43})));
}
TEST_F(ChunkCacheTest, SeparateCachesTransactionalWriteThenRead) {
grid = GetSimple1DGrid();
auto cache1 = MakeChunkCache();
auto cache2 = MakeChunkCache();
Transaction transaction(tensorstore::isolated);
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<int>({42, 43}),
GetTensorStore(cache1, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2)));
EXPECT_THAT(
tensorstore::Read(GetTensorStore(cache2, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2))
.result(),
::testing::Optional(MakeArray<int>({42, 43})));
transaction.CommitAsync().IgnoreFuture();
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(transaction.future());
EXPECT_THAT(GetChunk({0}), ElementsAre(MakeArray<int>({42, 43})));
}
TEST_F(ChunkCacheTest, SeparateCachesReadIfNotEqualAbort) {
grid = GetSimple1DGrid();
auto cache1 = MakeChunkCache();
auto cache2 = MakeChunkCache();
Transaction transaction(tensorstore::isolated);
{
auto read_future =
tensorstore::Read(GetTensorStore(cache1, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MakeArray<int>({0, 1})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache2, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MakeArray<int>({0, 1})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache2, {}, 0, transaction) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MakeArray<int>({0, 1})));
}
}
TENSORSTORE_GLOBAL_INITIALIZER {
tensorstore::internal::TensorStoreRepeatableReadTestOptions options;
options.test_suite_name = "RepeatableReadTest";
options.fill_value = MakeArray<int>({1, 2});
options.value1 = MakeArray<int>({3, 4});
options.value2 = MakeArray<int>({5, 6});
options.value3 = MakeArray<int>({7, 8});
options.key = EncodeKey({{0}});
auto grid = std::make_shared<ChunkGridSpecification>(
ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{SharedArray<const void>(options.fill_value),
Box<>(1)},
{2}}}));
options.encode_value =
[=](SharedArray<const void> value) -> Result<std::optional<absl::Cord>> {
return EncodeRaw(*grid, {value});
};
options.make_tensorstore =
[=](const tensorstore::Context& context) -> Result<TensorStore<>> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto mock_key_value_store_resource,
context
.GetResource<tensorstore::internal::MockKeyValueStoreResource>());
auto thread_pool = tensorstore::internal::DetachedThreadPool(1);
const auto& mock_store = *mock_key_value_store_resource;
auto pool = CachePool::Make(CachePool::Limits{});
auto cache = GetCache<TestCache>(pool.get(), "", [&] {
return std::make_unique<TestCache>(mock_store, *grid, thread_pool);
});
return tensorstore::internal::TensorStoreAccess::Construct<TensorStore<>>(
tensorstore::internal::Driver::Handle{
MakeDriver(cache), tensorstore::IdentityTransform(1)}) |
tensorstore::Dims(0).TranslateSizedInterval(0, 2);
};
tensorstore::internal::RegisterTensorStoreRepeatableReadTest(options);
}
TEST_F(ChunkCacheTest, CanReferenceSourceDataIndefinitely) {
grid = ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
SharedArray<const void>(tensorstore::AllocateArray<int32_t>(
{64}, tensorstore::c_order, tensorstore::value_init)),
Box<>(1)},
{64}}});
const Index size = 64;
auto index_array = tensorstore::AllocateArray<int64_t>({size});
for (Index i = 0; i < size; ++i) {
index_array(i) = i;
}
for (bool use_index_array_for_store : {false, true}) {
SCOPED_TRACE(absl::StrFormat("use_index_array_for_store=%d",
use_index_array_for_store));
for (bool use_index_array_for_source : {false, true}) {
SCOPED_TRACE(absl::StrFormat("use_index_array_for_source=%d",
use_index_array_for_source));
for (bool reference_source_data : {false, true}) {
SCOPED_TRACE(
absl::StrFormat("reference_source_data=%d", reference_source_data));
auto cache = MakeChunkCache();
auto store = GetTensorStore(cache);
auto full_chunk = tensorstore::AllocateArray<int32_t>({size});
std::fill_n(full_chunk.data(), full_chunk.num_elements(), 42);
tensorstore::TransformedSharedArray<const void> source_array =
full_chunk;
if (use_index_array_for_source) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
source_array,
source_array |
tensorstore::Dims(0).OuterIndexArraySlice(index_array));
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
store, store | tensorstore::Dims(0).SizedInterval(0, size));
if (use_index_array_for_store) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
store,
store | tensorstore::Dims(0).OuterIndexArraySlice(index_array));
}
auto write_future = tensorstore::Write(
source_array, store,
(reference_source_data
? tensorstore::can_reference_source_data_indefinitely
: tensorstore::cannot_reference_source_data));
const bool zero_copy = reference_source_data &&
!use_index_array_for_store &&
!use_index_array_for_source;
{
auto r = mock_store->write_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(0));
ASSERT_TRUE(r.value);
EXPECT_EQ(*r.value, EncodeRaw(*grid, {full_chunk}));
auto flat = r.value->TryFlat();
ASSERT_TRUE(flat);
EXPECT_EQ(zero_copy, (flat->data() == reinterpret_cast<const char*>(
full_chunk.data())));
r(memory_store);
}
TENSORSTORE_EXPECT_OK(write_future);
}
}
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/chunk_cache.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/chunk_cache_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c84094b4-b932-4a99-bebd-df271b170370 | cpp | google/tensorstore | bytes | tensorstore/driver/zarr3/codec/bytes.cc | tensorstore/driver/zarr3/codec/bytes_test.cc | #include "tensorstore/driver/zarr3/codec/bytes.h"
#include <assert.h>
#include <stdint.h>
#include <optional>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/riegeli/array_endian_codec.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace {
absl::Status InvalidDataTypeError(DataType dtype) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Data type ", dtype, " not compatible with \"bytes\" codec"));
}
class BytesCodec : public ZarrArrayToBytesCodec {
public:
explicit BytesCodec(DataType decoded_dtype, endian endianness)
: dtype_(decoded_dtype), endianness_(endianness) {}
Result<PreparedState::Ptr> Prepare(
span<const Index> decoded_shape) const final;
private:
DataType dtype_;
endian endianness_;
};
}
absl::Status BytesCodecSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
if (array_info.dtype.valid() &&
!internal::IsTrivialDataType(array_info.dtype)) {
return InvalidDataTypeError(array_info.dtype);
}
const DimensionIndex rank = array_info.rank;
if (rank != dynamic_rank) {
auto& inner_order = decoded.inner_order.emplace();
for (DimensionIndex i = 0; i < rank; ++i) {
inner_order[i] = i;
}
}
if (array_info.shape) {
auto& shape = *array_info.shape;
auto& read_chunk_shape = decoded.read_chunk_shape.emplace();
for (DimensionIndex i = 0; i < rank; ++i) {
read_chunk_shape[i] = shape[i];
}
}
return absl::OkStatus();
}
bool BytesCodecSpec::SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const {
if (!decoded.inner_order) return true;
if (PermutationMatchesOrder(span(decoded.inner_order->data(), decoded.rank),
c_order)) {
return true;
}
SetPermutation(c_order, preferred_inner_order);
return false;
}
Result<ZarrArrayToBytesCodec::Ptr> BytesCodecSpec::Resolve(
ArrayCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const {
assert(decoded.dtype.valid());
if (!internal::IsTrivialDataType(decoded.dtype)) {
return InvalidDataTypeError(decoded.dtype);
}
const bool is_endian_invariant =
internal::IsEndianInvariantDataType(decoded.dtype);
if (!options.constraints && !is_endian_invariant && !options.endianness) {
return absl::InvalidArgumentError(
tensorstore::StrCat("\"bytes\" codec requires that \"endian\" option "
"is specified for data type ",
decoded.dtype));
}
encoded.item_bits = decoded.dtype.size() * 8;
DimensionIndex rank = decoded.rank;
if (decoded.codec_chunk_shape) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"\"bytes\" codec does not support codec_chunk_shape (",
span<const Index>(decoded.codec_chunk_shape->data(), rank),
" was specified"));
}
if (decoded.inner_order) {
auto& decoded_inner_order = *decoded.inner_order;
for (DimensionIndex i = 0; i < rank; ++i) {
if (decoded_inner_order[i] != i) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"\"bytes\" codec does not support inner_order of ",
span<const DimensionIndex>(decoded_inner_order.data(), rank)));
}
}
}
endian resolved_endianness = options.endianness.value_or(endian::native);
if (resolved_spec) {
resolved_spec->reset(new BytesCodecSpec(Options{
is_endian_invariant ? std::optional<endian>()
: std::optional<endian>(resolved_endianness)}));
}
return internal::MakeIntrusivePtr<BytesCodec>(decoded.dtype,
resolved_endianness);
}
namespace {
namespace jb = ::tensorstore::internal_json_binding;
constexpr auto EndiannessBinder() {
return jb::Enum<endian, std::string_view>({
{endian::little, "little"},
{endian::big, "big"},
});
}
}
absl::Status BytesCodecSpec::MergeFrom(const ZarrCodecSpec& other,
bool strict) {
using Self = BytesCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::endianness>(
"endian", options, other_options, EndiannessBinder()));
return absl::OkStatus();
}
ZarrCodecSpec::Ptr BytesCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<BytesCodecSpec>(*this);
}
namespace {
class BytesCodecPreparedState : public ZarrArrayToBytesCodec::PreparedState {
public:
int64_t encoded_size() const final { return encoded_size_; }
absl::Status EncodeArray(SharedArrayView<const void> decoded,
riegeli::Writer& writer) const final {
if (internal::EncodeArrayEndian(std::move(decoded), endianness_, c_order,
writer)) {
return absl::OkStatus();
}
assert(!writer.ok());
return writer.status();
}
Result<SharedArray<const void>> DecodeArray(
span<const Index> decoded_shape, riegeli::Reader& reader) const final {
return internal::DecodeArrayEndian(reader, dtype_, decoded_shape,
endianness_, c_order);
}
DataType dtype_;
endian endianness_;
int64_t encoded_size_;
};
}
Result<ZarrArrayToBytesCodec::PreparedState::Ptr> BytesCodec::Prepare(
span<const Index> decoded_shape) const {
int64_t bytes = dtype_.size();
for (auto size : decoded_shape) {
if (internal::MulOverflow(size, bytes, &bytes)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing encoded size of array of shape ",
decoded_shape));
}
}
auto state = internal::MakeIntrusivePtr<BytesCodecPreparedState>();
state->dtype_ = dtype_;
state->endianness_ = endianness_;
state->encoded_size_ = bytes;
return state;
}
internal::IntrusivePtr<const BytesCodecSpec> DefaultBytesCodec() {
return internal::MakeIntrusivePtr<BytesCodecSpec>(
BytesCodecSpec::Options{endian::native});
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = BytesCodecSpec;
using Options = Self::Options;
RegisterCodec<Self>(
"bytes",
jb::Projection<&Self::options>(jb::Sequence(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
obj->constraints = options.constraints;
}
return absl::OkStatus();
},
jb::Member("endian",
jb::Projection<&Options::endianness>(
jb::Optional(EndiannessBinder())))
)));
}
}
} | #include <stdint.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::ArrayCodecResolveParameters;
using ::tensorstore::internal_zarr3::CodecRoundTripTestParams;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecRoundTrip;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(BytesTest, SpecRoundTrip) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {"bytes"};
p.expected_spec = ::nlohmann::json::array_t{GetDefaultBytesCodecJson()};
TestCodecSpecRoundTrip(p);
}
TEST(BytesTest, DuplicateArrayToBytes) {
EXPECT_THAT(
ZarrCodecChainSpec::FromJson({
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected bytes -> bytes codec, but received: .*"));
}
TEST(BytesTest, RoundTrip) {
CodecRoundTripTestParams p;
p.spec = {"bytes"};
TestCodecRoundTrip(p);
}
TEST(BytesTest, AutomaticTranspose) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
auto& inner_order = p.inner_order.emplace();
inner_order[0] = 1;
inner_order[1] = 0;
EXPECT_THAT(
TestCodecSpecResolve(
::nlohmann::json::array_t{GetDefaultBytesCodecJson()}, p),
::testing::Optional(MatchesJson({
{{"name", "transpose"}, {"configuration", {{"order", {1, 0}}}}},
GetDefaultBytesCodecJson(),
})));
}
TEST(BytesTest, EndianInvariantDataType) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint8_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(::nlohmann::json::array_t{{{"name", "bytes"}}}, p,
false),
::testing::Optional(
MatchesJson(::nlohmann::json::array_t{{{"name", "bytes"}}})));
}
TEST(BytesTest, MissingEndianEndianInvariantDataType) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(::nlohmann::json::array_t{{{"name", "bytes"}}}, p,
false),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: \"bytes\" codec requires that \"endian\" option is "
"specified for data type uint16"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/bytes.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/bytes_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f811cbd2-6439-4e77-8c12-22dfc521e799 | cpp | google/tensorstore | transpose | tensorstore/index_space/internal/transpose.cc | tensorstore/index_space/transpose_test.cc | #include "tensorstore/index_space/internal/transpose.h"
#include <cassert>
#include "absl/container/fixed_array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/rank.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
template <typename Source, typename Dest>
void PermuteArray(Source orig_array, Dest new_array,
span<const DimensionIndex> new_to_orig_map) {
assert(orig_array.size() == new_array.size());
assert(orig_array.size() == new_to_orig_map.size());
for (std::ptrdiff_t i = 0; i < orig_array.size(); ++i) {
new_array[i] = orig_array[new_to_orig_map[i]];
}
}
template <typename Source, typename Temp>
void PermuteArrayInPlace(Source array, Temp temp_array,
span<const DimensionIndex> new_to_orig_map) {
assert(array.size() == temp_array.size());
assert(array.size() == new_to_orig_map.size());
for (DimensionIndex i = 0; i < array.size(); ++i) {
temp_array[i] = array[i];
}
PermuteArray(temp_array, array, new_to_orig_map);
}
TransformRep::Ptr<> PermuteDimsOutOfPlace(
TransformRep* original, span<const DimensionIndex> permutation,
bool domain_only) {
const DimensionIndex input_rank = original->input_rank;
const DimensionIndex output_rank = domain_only ? 0 : original->output_rank;
assert(permutation.size() == input_rank);
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
DimensionIndex inverse_dimension_map[kMaxRank];
assert(IsValidPermutation(permutation));
for (DimensionIndex new_input_dim = 0; new_input_dim < input_rank;
++new_input_dim) {
const DimensionIndex orig_input_dim = permutation[new_input_dim];
assert(orig_input_dim >= 0 && orig_input_dim < input_rank);
result->input_dimension(new_input_dim) =
original->input_dimension(orig_input_dim);
inverse_dimension_map[orig_input_dim] = new_input_dim;
}
span<const OutputIndexMap> original_maps =
original->output_index_maps().first(output_rank);
span<OutputIndexMap> result_maps =
result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& result_map = result_maps[output_dim];
const auto& orig_map = original_maps[output_dim];
result_map.offset() = orig_map.offset();
result_map.stride() = orig_map.stride();
switch (orig_map.method()) {
case OutputIndexMethod::constant:
result_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex orig_input_dim = orig_map.input_dimension();
assert(orig_input_dim >= 0 && orig_input_dim < input_rank);
const DimensionIndex new_input_dim =
inverse_dimension_map[orig_input_dim];
assert(new_input_dim >= 0 && new_input_dim < input_rank);
result_map.SetSingleInputDimension(new_input_dim);
break;
}
case OutputIndexMethod::array: {
auto& result_index_array_data = result_map.SetArrayIndexing(input_rank);
const auto& orig_index_array_data = orig_map.index_array_data();
result_index_array_data.element_pointer =
orig_index_array_data.element_pointer;
result_index_array_data.index_range = orig_index_array_data.index_range;
PermuteArray(span(orig_index_array_data.byte_strides, input_rank),
span(result_index_array_data.byte_strides, input_rank),
permutation);
break;
}
}
}
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
TransformRep::Ptr<> PermuteDimsInplace(TransformRep::Ptr<> rep,
span<const DimensionIndex> permutation,
bool domain_only) {
if (domain_only) {
ResetOutputIndexMaps(rep.get());
}
const DimensionIndex input_rank = rep->input_rank;
const DimensionIndex output_rank = rep->output_rank;
assert(permutation.size() == input_rank);
DimensionIndex inverse_dimension_map[kMaxRank];
InvertPermutation(input_rank, permutation.data(), inverse_dimension_map);
{
absl::FixedArray<IndexDomainDimension<container>, kMaxRank> temp_array(
input_rank);
PermuteArrayInPlace(rep->all_input_dimensions(input_rank), span(temp_array),
permutation);
}
{
const span<OutputIndexMap> maps =
rep->output_index_maps().first(output_rank);
DimensionIndex temp_index_array[kMaxRank];
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
auto& map = maps[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension:
map.SetSingleInputDimension(
inverse_dimension_map[map.input_dimension()]);
break;
case OutputIndexMethod::array: {
auto& index_array_data = map.index_array_data();
PermuteArrayInPlace(
span(index_array_data.byte_strides, input_rank),
span<DimensionIndex>(&temp_index_array[0], input_rank),
permutation);
break;
}
}
}
}
internal_index_space::DebugCheckInvariants(rep.get());
return rep;
}
TransformRep::Ptr<> PermuteOutputDimsInplace(
TransformRep::Ptr<> rep, span<const DimensionIndex> permutation) {
const DimensionIndex output_rank = rep->output_rank;
assert(permutation.size() == output_rank);
alignas(OutputIndexMap) char temp_buf[sizeof(OutputIndexMap) * kMaxRank];
OutputIndexMap* output_index_maps = rep->output_index_maps().data();
OutputIndexMap* temp_index_maps = reinterpret_cast<OutputIndexMap*>(temp_buf);
memcpy(static_cast<void*>(temp_index_maps),
static_cast<const void*>(output_index_maps),
sizeof(OutputIndexMap) * output_rank);
for (DimensionIndex new_output_dim = 0; new_output_dim < output_rank;
++new_output_dim) {
const DimensionIndex old_output_dim = permutation[new_output_dim];
memcpy(static_cast<void*>(&output_index_maps[new_output_dim]),
static_cast<const void*>(&temp_index_maps[old_output_dim]),
sizeof(OutputIndexMap));
}
internal_index_space::DebugCheckInvariants(rep.get());
return rep;
}
TransformRep::Ptr<> PermuteOutputDimsOutOfPlace(
TransformRep* original, span<const DimensionIndex> permutation) {
const DimensionIndex input_rank = original->input_rank;
const DimensionIndex output_rank = original->output_rank;
assert(permutation.size() == output_rank);
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
internal_index_space::CopyTransformRepDomain(original, result.get());
const OutputIndexMap* old_output_index_maps =
original->output_index_maps().data();
OutputIndexMap* new_output_index_maps = result->output_index_maps().data();
for (DimensionIndex new_output_dim = 0; new_output_dim < output_rank;
++new_output_dim) {
const DimensionIndex old_output_dim = permutation[new_output_dim];
new_output_index_maps[new_output_dim].Assign(
input_rank, old_output_index_maps[old_output_dim]);
}
return result;
}
}
TransformRep::Ptr<> TransposeInputDimensions(
TransformRep::Ptr<> transform, span<const DimensionIndex> permutation,
bool domain_only) {
if (!transform) return {};
if (transform->is_unique()) {
return PermuteDimsInplace(std::move(transform), permutation, domain_only);
} else {
return PermuteDimsOutOfPlace(transform.get(), permutation, domain_only);
}
}
TransformRep::Ptr<> TransposeInputDimensions(TransformRep::Ptr<> transform,
bool domain_only) {
if (!transform) return {};
DimensionIndex permutation[kMaxRank];
const DimensionIndex rank = transform->input_rank;
for (DimensionIndex i = 0; i < rank; ++i) {
permutation[i] = rank - i - 1;
}
return TransposeInputDimensions(
std::move(transform), span<const DimensionIndex>(&permutation[0], rank),
domain_only);
}
TransformRep::Ptr<> TransposeOutputDimensions(
TransformRep::Ptr<> transform, span<const DimensionIndex> permutation) {
if (!transform) return {};
if (transform->is_unique()) {
return PermuteOutputDimsInplace(std::move(transform), permutation);
} else {
return PermuteOutputDimsOutOfPlace(transform.get(), permutation);
}
}
TransformRep::Ptr<> TransposeOutputDimensions(TransformRep::Ptr<> transform) {
if (!transform) return {};
DimensionIndex permutation[kMaxRank];
const DimensionIndex rank = transform->output_rank;
for (DimensionIndex i = 0; i < rank; ++i) {
permutation[i] = rank - i - 1;
}
return TransposeOutputDimensions(
std::move(transform), span<const DimensionIndex>(&permutation[0], rank));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
namespace {
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
TEST(TransposeTest, Reverse) {
auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
auto original_domain = IndexDomainBuilder<3>()
.origin({1, 2, 3})
.shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.labels({"x", "y", "z"})
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({3, 2, 1})
.input_shape({2, 4, 3})
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"z", "y", "x"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
EXPECT_THAT(original_transform.Transpose(),
::testing::Eq(expected_new_transform));
EXPECT_THAT(std::move(original_transform).Transpose(),
::testing::Eq(expected_new_transform));
EXPECT_THAT(original_domain.Transpose(),
::testing::Eq(expected_new_transform.domain()));
EXPECT_THAT(std::move(original_domain).Transpose(),
::testing::Eq(expected_new_transform.domain()));
}
TEST(TransposeTest, Permutation) {
auto original_transform = IndexTransformBuilder(3, 3)
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
auto original_domain = IndexDomainBuilder(3)
.origin({1, 2, 3})
.shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.labels({"x", "y", "z"})
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder(3, 3)
.input_origin({3, 1, 2})
.input_shape({2, 3, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.input_labels({"z", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
EXPECT_THAT(original_transform.Transpose({{2, 0, 1}}),
::testing::Eq(expected_new_transform));
EXPECT_THAT(std::move(original_transform).Transpose({{2, 0, 1}}),
::testing::Eq(expected_new_transform));
EXPECT_THAT(original_domain.Transpose({{2, 0, 1}}),
::testing::Eq(expected_new_transform.domain()));
EXPECT_THAT(std::move(original_domain).Transpose({{2, 0, 1}}),
::testing::Eq(expected_new_transform.domain()));
}
TEST(TransposeTest, ReverseOutput) {
auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
EXPECT_THAT(original_transform.TransposeOutput(),
::testing::Eq(expected_new_transform));
EXPECT_THAT(std::move(original_transform).TransposeOutput(),
::testing::Eq(expected_new_transform));
}
TEST(TransposeTest, PermutationOutput) {
auto original_transform = IndexTransformBuilder(3, 3)
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder(3, 3)
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 1)
.Finalize()
.value();
EXPECT_THAT(original_transform.TransposeOutput({{2, 0, 1}}),
::testing::Eq(expected_new_transform));
EXPECT_THAT(std::move(original_transform).TransposeOutput({{2, 0, 1}}),
::testing::Eq(expected_new_transform));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/transpose.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transpose_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d9364773-75c4-4fc1-aae1-4061a3fee721 | cpp | google/tensorstore | blosc | tensorstore/internal/compression/blosc.cc | tensorstore/internal/compression/blosc_test.cc | #include "tensorstore/internal/compression/blosc.h"
#include <cstddef>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include <blosc.h>
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace blosc {
Result<std::string> Encode(std::string_view input, const Options& options) {
if (input.size() > BLOSC_MAX_BUFFERSIZE) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Blosc compression input of ", input.size(),
" bytes exceeds maximum size of ", BLOSC_MAX_BUFFERSIZE));
}
std::string output(input.size() + BLOSC_MAX_OVERHEAD, '\0');
int shuffle = options.shuffle;
if (shuffle == -1) {
shuffle = options.element_size == 1 ? BLOSC_BITSHUFFLE : BLOSC_SHUFFLE;
}
const int n = blosc_compress_ctx(
options.clevel, shuffle, options.element_size, input.size(), input.data(),
output.data(), output.size(), options.compressor, options.blocksize,
1);
if (n < 0) {
return absl::InternalError(
tensorstore::StrCat("Internal blosc error: ", n));
}
output.erase(n);
return output;
}
Result<std::string> Decode(std::string_view input) {
size_t nbytes;
if (blosc_cbuffer_validate(input.data(), input.size(), &nbytes) != 0) {
return absl::InvalidArgumentError("Invalid blosc-compressed data");
}
std::string output(nbytes, '\0');
if (nbytes > 0) {
const int n =
blosc_decompress_ctx(input.data(), output.data(), output.size(),
1);
if (n <= 0) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Blosc error: ", n));
}
}
return output;
}
}
} | #include "tensorstore/internal/compression/blosc.h"
#include <cstddef>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <blosc.h>
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
namespace blosc = tensorstore::blosc;
std::vector<blosc::Options> GetTestOptions() {
return {
blosc::Options{"lz4", 5, -1, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 0, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4hc", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 9, BLOSC_BITSHUFFLE, 0},
blosc::Options{"zlib", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"zstd", 1, BLOSC_SHUFFLE, 0},
blosc::Options{"blosclz", 1, BLOSC_BITSHUFFLE, 0},
blosc::Options{"snappy", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 256},
blosc::Options{"lz4", 1, BLOSC_NOSHUFFLE, 256},
};
}
std::vector<std::string> GetTestArrays() {
std::vector<std::string> arrays;
arrays.emplace_back();
{
std::string arr(100, '\0');
unsigned char v = 0;
for (auto& x : arr) {
x = (v += 7);
}
arrays.push_back(std::move(arr));
}
arrays.push_back("The quick brown fox jumped over the lazy dog.");
return arrays;
}
TEST(BloscTest, EncodeDecode) {
for (blosc::Options options : GetTestOptions()) {
for (const auto& array : GetTestArrays()) {
for (const size_t element_size : {1, 2, 10}) {
options.element_size = element_size;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded, blosc::Decode(encoded));
EXPECT_EQ(array, decoded);
}
}
}
}
TEST(BloscTest, CheckComplib) {
const std::string_view array =
"The quick brown fox jumped over the lazy dog.";
const std::vector<std::pair<std::string, std::string>>
cnames_and_complib_names{{BLOSC_BLOSCLZ_COMPNAME, BLOSC_BLOSCLZ_LIBNAME},
{BLOSC_LZ4_COMPNAME, BLOSC_LZ4_LIBNAME},
{BLOSC_LZ4HC_COMPNAME, BLOSC_LZ4_LIBNAME},
{BLOSC_SNAPPY_COMPNAME, BLOSC_SNAPPY_LIBNAME},
{BLOSC_ZLIB_COMPNAME, BLOSC_ZLIB_LIBNAME},
{BLOSC_ZSTD_COMPNAME, BLOSC_ZSTD_LIBNAME}};
for (const auto& pair : cnames_and_complib_names) {
blosc::Options options{pair.first.c_str(), 5,
-1, 0,
1};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
const char* complib = blosc_cbuffer_complib(encoded.data());
EXPECT_EQ(pair.second, complib);
}
}
TEST(BloscTest, CheckShuffleAndElementSize) {
const std::string_view array =
"The quick brown fox jumped over the lazy dog.";
for (int shuffle = -1; shuffle <= 2; ++shuffle) {
for (const size_t element_size : {1, 2, 10}) {
blosc::Options options{"lz4", 5,
shuffle, 0,
element_size};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
size_t typesize;
int flags;
blosc_cbuffer_metainfo(encoded.data(), &typesize, &flags);
EXPECT_EQ(element_size, typesize);
const bool expected_byte_shuffle =
shuffle == 1 || (shuffle == -1 && element_size != 1);
const bool expected_bit_shuffle =
shuffle == 2 || (shuffle == -1 && element_size == 1);
EXPECT_EQ(expected_byte_shuffle,
static_cast<bool>(flags & BLOSC_DOSHUFFLE));
EXPECT_EQ(expected_bit_shuffle,
static_cast<bool>(flags & BLOSC_DOBITSHUFFLE));
}
}
}
TEST(BloscTest, CheckBlocksize) {
const std::string array(100000, '\0');
for (size_t blocksize : {256, 512, 1024}) {
blosc::Options options{"lz4", 0,
0, blocksize,
1};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
size_t nbytes, cbytes, bsize;
blosc_cbuffer_sizes(encoded.data(), &nbytes, &cbytes, &bsize);
EXPECT_EQ(blocksize, bsize);
}
}
TEST(BloscTest, TooLong) {
blosc::Options options{"lz4", 5,
-1, 0,
1};
EXPECT_THAT(
blosc::Encode(std::string(BLOSC_MAX_BUFFERSIZE + 1, '\0'), options),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscTest, DecodeHeaderCorrupted) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
ASSERT_GE(encoded.size(), 1);
std::string corrupted = std::move(encoded);
corrupted[0] = 0;
EXPECT_THAT(blosc::Decode(corrupted),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressorTest, DecodeHeaderTruncated) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
ASSERT_GE(encoded.size(), 5);
EXPECT_THAT(blosc::Decode(std::string_view(encoded).substr(0, 5)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressorTest, DecodeDataTruncated) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
EXPECT_THAT(blosc::Decode(
std::string_view(encoded).substr(0, BLOSC_MIN_HEADER_LENGTH)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/blosc.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/blosc_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6120788d-e5fe-472e-a809-2e2d418ab7b1 | cpp | google/tensorstore | gzip | tensorstore/driver/zarr3/codec/gzip.cc | tensorstore/driver/zarr3/codec/gzip_test.cc | #include "tensorstore/driver/zarr3/codec/gzip.h"
#include <stdint.h>
#include <memory>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zlib/zlib_writer.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace {
constexpr int kDefaultLevel = 6;
class GzipCodec : public ZarrBytesToBytesCodec {
public:
explicit GzipCodec(int level) : level_(level) {}
class State : public ZarrBytesToBytesCodec::PreparedState {
public:
Result<std::unique_ptr<riegeli::Writer>> GetEncodeWriter(
riegeli::Writer& encoded_writer) const final {
using Writer = riegeli::ZlibWriter<riegeli::Writer*>;
Writer::Options options;
options.set_compression_level(level_);
options.set_header(Writer::Header::kGzip);
return std::make_unique<Writer>(&encoded_writer, options);
}
Result<std::unique_ptr<riegeli::Reader>> GetDecodeReader(
riegeli::Reader& encoded_reader) const final {
using Reader = riegeli::ZlibReader<riegeli::Reader*>;
Reader::Options options;
options.set_header(Reader::Header::kGzip);
return std::make_unique<Reader>(&encoded_reader, options);
}
int level_;
};
Result<PreparedState::Ptr> Prepare(int64_t decoded_size) const final {
auto state = internal::MakeIntrusivePtr<State>();
state->level_ = level_;
return state;
}
private:
int level_;
};
}
absl::Status GzipCodecSpec::MergeFrom(const ZarrCodecSpec& other, bool strict) {
using Self = GzipCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(
MergeConstraint<&Options::level>("level", options, other_options));
return absl::OkStatus();
}
ZarrCodecSpec::Ptr GzipCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<GzipCodecSpec>(*this);
}
Result<ZarrBytesToBytesCodec::Ptr> GzipCodecSpec::Resolve(
BytesCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrBytesToBytesCodecSpec::Ptr* resolved_spec) const {
auto resolved_level = options.level.value_or(kDefaultLevel);
if (resolved_spec) {
resolved_spec->reset(
options.level ? this : new GzipCodecSpec(Options{resolved_level}));
}
return internal::MakeIntrusivePtr<GzipCodec>(resolved_level);
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = GzipCodecSpec;
using Options = Self::Options;
namespace jb = ::tensorstore::internal_json_binding;
RegisterCodec<Self>(
"gzip",
jb::Projection<&Self::options>(jb::Sequence(
jb::Member("level", jb::Projection<&Options::level>(
OptionalIfConstraintsBinder(
jb::Integer<int>(0, 9))))
)));
}
}
} | #include <gtest/gtest.h>
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
namespace {
using ::tensorstore::internal_zarr3::CodecRoundTripTestParams;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecRoundTrip;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
TEST(GzipTest, EndianInferred) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {
{{"name", "gzip"}, {"configuration", {{"level", 7}}}},
};
p.expected_spec = {
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 7}}}},
};
TestCodecSpecRoundTrip(p);
}
TEST(GzipTest, DefaultLevel) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {
{{"name", "gzip"}},
};
p.expected_spec = {
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
TestCodecSpecRoundTrip(p);
}
TEST(GzipTest, RoundTrip) {
CodecRoundTripTestParams p;
p.spec = {"gzip"};
TestCodecRoundTrip(p);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/gzip.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/gzip_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
93ddf3a3-54f7-4d52-9e60-c7a46d275e27 | cpp | google/tensorstore | codec_chain_spec | tensorstore/driver/zarr3/codec/codec_chain_spec.cc | tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc | #include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include <stddef.h>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/codec_spec.h"
#include "tensorstore/codec_spec_registry.h"
#include "tensorstore/driver/zarr3/codec/bytes.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/driver/zarr3/codec/transpose.h"
#include "tensorstore/driver/zarr3/name_configuration_json_binder.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/json_bindable.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace jb = ::tensorstore::internal_json_binding;
namespace {
struct ZarrCodecJsonBinderImpl {
static absl::Status FromJson(const ZarrCodecSpec::FromJsonOptions& options,
ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j);
static absl::Status ToJson(const ZarrCodecSpec::ToJsonOptions& options,
const ZarrCodecSpec* const* obj,
::nlohmann::json* j);
absl::Status operator()(std::true_type is_loading,
const ZarrCodecSpec::FromJsonOptions& options,
ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j) const {
return FromJson(options, obj, j);
}
template <typename T>
absl::Status operator()(std::false_type is_loading,
const ZarrCodecSpec::ToJsonOptions& options, T* obj,
::nlohmann::json* j) const {
static_assert(
std::is_convertible_v<decltype(&**obj), const ZarrCodecSpec*>);
const ZarrCodecSpec* ptr = &**obj;
return ToJson(options, &ptr, j);
}
};
constexpr inline ZarrCodecJsonBinderImpl ZarrCodecJsonBinder{};
constexpr auto ZarrCodecJsonBinderImplBase =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
const auto& registry = GetCodecRegistry();
if constexpr (is_loading) {
if (options.constraints && j->is_string()) {
::nlohmann::json::object_t j_obj;
j_obj.emplace("name", std::move(*j));
*j = std::move(j_obj);
}
}
return jb::Object(NameConfigurationJsonBinder(
registry.KeyBinder(), registry.RegisteredObjectBinder()))
(is_loading, options, obj, j);
};
absl::Status ZarrCodecJsonBinderImpl::FromJson(
const ZarrCodecSpec::FromJsonOptions& options, ZarrCodecSpec::Ptr* obj,
::nlohmann::json* j) {
return ZarrCodecJsonBinderImplBase(std::true_type{}, options, obj, j);
}
absl::Status ZarrCodecJsonBinderImpl::ToJson(
const ZarrCodecSpec::ToJsonOptions& options,
const ZarrCodecSpec* const* obj, ::nlohmann::json* j) {
return ZarrCodecJsonBinderImplBase(std::false_type{}, options, obj, j);
}
constexpr auto ZarrCodecChainSpecJsonBinderImpl = jb::Compose<
std::vector<ZarrCodecSpec::Ptr>>(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
auto it = j->begin(), end = j->end();
for (; it != end && (*it)->kind() == ZarrCodecKind::kArrayToArray;
++it) {
obj->array_to_array.push_back(
internal::static_pointer_cast<const ZarrArrayToArrayCodecSpec>(
std::move(*it)));
}
if (it != end && (*it)->kind() == ZarrCodecKind::kArrayToBytes) {
obj->array_to_bytes =
internal::static_pointer_cast<const ZarrArrayToBytesCodecSpec>(
std::move(*it));
++it;
} else if (!options.constraints) {
return absl::InvalidArgumentError(
"array -> bytes codec must be specified");
}
for (; it != end; ++it) {
if ((*it)->kind() != ZarrCodecKind::kBytesToBytes) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected bytes -> bytes codec, but received: ",
jb::ToJson(*it, ZarrCodecJsonBinder).value().dump()));
}
obj->bytes_to_bytes.push_back(
internal::static_pointer_cast<const ZarrBytesToBytesCodecSpec>(
std::move(*it)));
}
} else {
j->insert(j->end(), obj->array_to_array.begin(),
obj->array_to_array.end());
if (obj->array_to_bytes) {
j->push_back(obj->array_to_bytes);
}
j->insert(j->end(), obj->bytes_to_bytes.begin(),
obj->bytes_to_bytes.end());
}
return absl::OkStatus();
},
jb::Array(ZarrCodecJsonBinder));
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrCodecChainSpec,
ZarrCodecChainSpecJsonBinderImpl);
namespace {
Result<ZarrArrayToBytesCodecSpec::Ptr> GetDefaultArrayToBytesCodecSpec(
const ArrayCodecResolveParameters& decoded) {
if (internal::IsTrivialDataType(decoded.dtype)) {
return DefaultBytesCodec();
}
return absl::InternalError(tensorstore::StrCat(
"No default codec defined for data type ", decoded.dtype));
}
absl::Status CodecResolveError(const ZarrCodecSpec& codec_spec,
std::string_view message,
const absl::Status& status) {
return tensorstore::MaybeAnnotateStatus(
status, tensorstore::StrCat(
"Error ", message, " through ",
jb::ToJson(&codec_spec, ZarrCodecJsonBinder).value().dump()));
}
}
size_t ZarrCodecChainSpec::sharding_height() const {
return array_to_bytes ? array_to_bytes->sharding_height() : 0;
}
absl::Status ZarrCodecChainSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
absl::FixedArray<ArrayDataTypeAndShapeInfo, 2> array_infos(
array_to_array.size());
const ArrayDataTypeAndShapeInfo* decoded_array_info = &array_info;
for (size_t i = 0; i < array_to_array.size(); ++i) {
const auto& codec_spec = *array_to_array[i];
auto& encoded_array_info = array_infos[i];
TENSORSTORE_RETURN_IF_ERROR(
codec_spec.PropagateDataTypeAndShape(*decoded_array_info,
encoded_array_info),
CodecResolveError(codec_spec, "propagating data type and shape", _));
decoded_array_info = &encoded_array_info;
}
std::optional<ArrayCodecChunkLayoutInfo> temp_info[2];
const ArrayCodecChunkLayoutInfo* encoded_info;
if (array_to_bytes) {
auto& decoded_info = array_infos.empty() ? decoded : temp_info[0].emplace();
TENSORSTORE_RETURN_IF_ERROR(
array_to_bytes->GetDecodedChunkLayout(
array_infos.empty() ? array_info : array_infos.back(),
decoded_info),
CodecResolveError(*array_to_bytes, "propagating chunk layout", _));
encoded_info = &decoded_info;
} else if (!array_to_array.empty()) {
encoded_info = &temp_info[0].emplace();
}
for (size_t i = array_to_array.size(); i--;) {
auto& decoded_info =
i == 0 ? decoded : temp_info[(array_to_array.size() - i) % 2].emplace();
const auto& codec_spec = *array_to_array[i];
TENSORSTORE_RETURN_IF_ERROR(
codec_spec.GetDecodedChunkLayout(
array_infos[i], *encoded_info,
i == 0 ? array_info : array_infos[i - 1], decoded_info),
CodecResolveError(codec_spec, "propagating chunk layout", _));
encoded_info = &decoded_info;
}
return absl::OkStatus();
}
Result<internal::IntrusivePtr<const ZarrCodecChain>>
ZarrCodecChainSpec::Resolve(ArrayCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrCodecChainSpec* resolved_spec) const {
auto chain = internal::MakeIntrusivePtr<ZarrCodecChain>();
std::optional<ArrayCodecResolveParameters> temp_array_resolve_params[2];
chain->array_to_array.reserve(array_to_array.size());
chain->bytes_to_bytes.reserve(bytes_to_bytes.size());
if (resolved_spec) {
assert(resolved_spec != this);
assert(resolved_spec->array_to_array.empty());
resolved_spec->array_to_array.reserve(array_to_array.size());
assert(!resolved_spec->array_to_bytes);
assert(resolved_spec->bytes_to_bytes.empty());
resolved_spec->bytes_to_bytes.reserve(bytes_to_bytes.size());
}
ArrayCodecResolveParameters* decoded_params = &decoded;
size_t temp_i = 0;
const auto resolve_array_to_array =
[&](const ZarrArrayToArrayCodecSpec& codec_spec) -> absl::Status {
auto& encoded_params = temp_array_resolve_params[(temp_i++) % 2].emplace();
TENSORSTORE_ASSIGN_OR_RETURN(
auto codec,
codec_spec.Resolve(std::move(*decoded_params), encoded_params,
resolved_spec
? &resolved_spec->array_to_array.emplace_back()
: nullptr),
CodecResolveError(codec_spec, "resolving codec spec", _));
chain->array_to_array.push_back(std::move(codec));
decoded_params = &encoded_params;
return absl::OkStatus();
};
for (size_t i = 0; i < array_to_array.size(); ++i) {
TENSORSTORE_RETURN_IF_ERROR(resolve_array_to_array(*array_to_array[i]));
}
std::optional<BytesCodecResolveParameters> temp_bytes_resolve_params[2];
auto* bytes_decoded_params = &temp_bytes_resolve_params[0].emplace();
ZarrArrayToBytesCodecSpec::Ptr temp_array_to_bytes_codec;
auto* array_to_bytes_codec_ptr = this->array_to_bytes.get();
if (!array_to_bytes_codec_ptr) {
TENSORSTORE_ASSIGN_OR_RETURN(
temp_array_to_bytes_codec,
GetDefaultArrayToBytesCodecSpec(*decoded_params));
array_to_bytes_codec_ptr = temp_array_to_bytes_codec.get();
}
DimensionIndex preferred_order[kMaxRank];
if (DimensionIndex rank = decoded_params->rank;
decoded_params->inner_order &&
!array_to_bytes_codec_ptr->SupportsInnerOrder(
*decoded_params, span<DimensionIndex>(&preferred_order[0], rank))) {
const auto& existing_inner_order = *decoded_params->inner_order;
std::vector<DimensionIndex> new_order(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
new_order[preferred_order[i]] = existing_inner_order[i];
}
TENSORSTORE_RETURN_IF_ERROR(
resolve_array_to_array(*internal::MakeIntrusivePtr<TransposeCodecSpec>(
TransposeCodecSpec::Options{std::move(new_order)})));
}
TENSORSTORE_ASSIGN_OR_RETURN(
chain->array_to_bytes,
array_to_bytes_codec_ptr->Resolve(
std::move(*decoded_params), *bytes_decoded_params,
resolved_spec ? &resolved_spec->array_to_bytes : nullptr),
CodecResolveError(*array_to_bytes, "resolving codec spec", _));
if (chain->array_to_bytes->is_sharding_codec() && !bytes_to_bytes.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Sharding codec %s is not compatible with subsequent bytes -> "
"bytes codecs %s that apply to the entire shard. Instead, "
"bytes -> bytes codecs may be specified as inner codecs that apply "
"to each sub-chunk individually.",
jb::ToJson(array_to_bytes_codec_ptr, ZarrCodecJsonBinder)
.value()
.dump(),
jb::ToJson(bytes_to_bytes, jb::Array(ZarrCodecJsonBinder))
.value()
.dump()));
}
for (size_t i = 0; i < bytes_to_bytes.size(); ++i) {
auto& encoded_params = temp_bytes_resolve_params[(i + 1) % 2].emplace();
const auto& codec_spec = *bytes_to_bytes[i];
TENSORSTORE_ASSIGN_OR_RETURN(
auto codec,
codec_spec.Resolve(std::move(*bytes_decoded_params), encoded_params,
resolved_spec
? &resolved_spec->bytes_to_bytes.emplace_back()
: nullptr),
CodecResolveError(codec_spec, "resolving codec spec", _));
bytes_decoded_params = &encoded_params;
chain->bytes_to_bytes.push_back(std::move(codec));
}
encoded = std::move(*bytes_decoded_params);
return chain;
}
namespace {
template <typename T, typename Binder>
std::string MergeErrorMessage(const T& a, const T& b, const Binder& binder) {
return absl::StrFormat("Cannot merge zarr codec constraints %s and %s",
jb::ToJson(a, binder).value().dump(),
jb::ToJson(b, binder).value().dump());
}
std::string MergeErrorMessage(const ZarrCodecSpec& a, const ZarrCodecSpec& b) {
return MergeErrorMessage(ZarrCodecSpec::Ptr(&a), ZarrCodecSpec::Ptr(&b),
ZarrCodecJsonBinder);
}
template <typename T>
void EnsureMutableCodecSpec(internal::IntrusivePtr<const T>& ptr) {
static_assert(std::is_base_of_v<ZarrCodecSpec, T>);
assert(ptr);
if (ptr->use_count() > 1) {
ptr = internal::static_pointer_cast<const T>(ptr->Clone());
}
}
absl::Status MergeZarrCodecSpecs(ZarrCodecSpec::Ptr& target,
const ZarrCodecSpec* source, bool strict) {
if (!source) {
return absl::OkStatus();
}
if (!target) {
target.reset(source);
return absl::OkStatus();
}
absl::Status status;
const auto& target_ref = *target;
const auto& source_ref = *source;
if (typeid(target_ref) != typeid(source_ref)) {
status = absl::FailedPreconditionError("");
} else {
EnsureMutableCodecSpec(target);
status = const_cast<ZarrCodecSpec&>(*target).MergeFrom(*source, strict);
}
if (status.ok()) return absl::OkStatus();
return tensorstore::MaybeAnnotateStatus(status,
MergeErrorMessage(*target, *source));
}
template <typename T>
absl::Status MergeZarrCodecSpecs(typename T::Ptr& target, const T* source,
bool strict) {
static_assert(std::is_base_of_v<ZarrCodecSpec, T>);
ZarrCodecSpec::Ptr target_base = std::move(target);
auto status = MergeZarrCodecSpecs(target_base, source, strict);
target = internal::static_pointer_cast<const T>(std::move(target_base));
TENSORSTORE_RETURN_IF_ERROR(status);
return absl::OkStatus();
}
template <typename T>
absl::Status MergeZarrCodecSpecs(std::vector<T>& targets,
const std::vector<T>& sources, bool strict) {
constexpr bool kIsArrayToArray =
std::is_same_v<ZarrArrayToArrayCodecSpec::Ptr, T>;
size_t merge_count = targets.size();
bool size_mismatch = targets.size() != sources.size();
if constexpr (kIsArrayToArray) {
if (!strict) {
if (sources.size() == targets.size() + 1 &&
typeid(*sources.back()) == typeid(TransposeCodecSpec)) {
targets.push_back(sources.back());
size_mismatch = false;
} else if (sources.size() + 1 == targets.size() &&
typeid(*targets.back()) == typeid(TransposeCodecSpec)) {
--merge_count;
size_mismatch = false;
}
}
}
if (size_mismatch) {
return tensorstore::MaybeAnnotateStatus(
absl::FailedPreconditionError(absl::StrFormat(
"Mismatch in number of %s codecs (%d vs %d)",
kIsArrayToArray ? "array -> array" : "bytes -> bytes",
targets.size(), sources.size())),
MergeErrorMessage(targets, sources, jb::Array(ZarrCodecJsonBinder)));
}
for (size_t i = 0; i < merge_count; ++i) {
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(targets[i], sources[i].get(), strict));
}
return absl::OkStatus();
}
}
absl::Status ZarrCodecChainSpec::MergeFrom(const ZarrCodecChainSpec& other,
bool strict) {
if (!strict) {
size_t self_sharding_height = sharding_height();
size_t other_sharding_height = other.sharding_height();
if (self_sharding_height > other_sharding_height &&
array_to_array.empty() && bytes_to_bytes.empty()) {
EnsureMutableCodecSpec(array_to_bytes);
return static_cast<ZarrShardingCodecSpec&>(
const_cast<ZarrArrayToBytesCodecSpec&>(*array_to_bytes))
.MergeSubChunkCodecsFrom(other, strict);
}
if (self_sharding_height < other_sharding_height &&
other.array_to_array.empty() && other.bytes_to_bytes.empty()) {
auto new_array_to_bytes_codec =
internal::static_pointer_cast<const ZarrShardingCodecSpec>(
other.array_to_bytes->Clone());
TENSORSTORE_RETURN_IF_ERROR(
const_cast<ZarrShardingCodecSpec&>(*new_array_to_bytes_codec)
.MergeSubChunkCodecsFrom(*this, strict));
array_to_array.clear();
bytes_to_bytes.clear();
array_to_bytes = std::move(new_array_to_bytes_codec);
return absl::OkStatus();
}
}
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(array_to_array, other.array_to_array, strict));
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(array_to_bytes, other.array_to_bytes.get(), strict));
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(bytes_to_bytes, other.bytes_to_bytes, strict));
return absl::OkStatus();
}
absl::Status MergeZarrCodecSpecs(
std::optional<ZarrCodecChainSpec>& target,
const std::optional<ZarrCodecChainSpec>& source, bool strict) {
if (!target) {
if (source) {
target = *source;
}
return absl::OkStatus();
}
if (!source) {
return absl::OkStatus();
}
return target->MergeFrom(*source, strict);
}
bool ZarrShardingCodecSpec::SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const {
return true;
}
size_t ZarrShardingCodecSpec::sharding_height() const {
auto* sub_chunk_codecs = this->GetSubChunkCodecs();
return 1 + (sub_chunk_codecs ? sub_chunk_codecs->sharding_height() : 0);
}
CodecSpec TensorStoreCodecSpec::Clone() const {
return internal::CodecDriverSpec::Make<TensorStoreCodecSpec>(*this);
}
absl::Status TensorStoreCodecSpec::DoMergeFrom(
const internal::CodecDriverSpec& other_base) {
if (typeid(other_base) != typeid(TensorStoreCodecSpec)) {
return absl::InvalidArgumentError("");
}
auto& other = static_cast<const TensorStoreCodecSpec&>(other_base);
return MergeZarrCodecSpecs(codecs, other.codecs, false);
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
TensorStoreCodecSpec,
jb::Sequence(
jb::Member("codecs",
jb::Projection<&TensorStoreCodecSpec::codecs>(jb::Optional(
ZarrCodecChainJsonBinder<true>)))
))
namespace {
const internal::CodecSpecRegistration<TensorStoreCodecSpec>
encoding_registration;
}
}
namespace internal {
void CacheKeyEncoder<internal_zarr3::ZarrCodecChainSpec>::Encode(
std::string* out, const internal_zarr3::ZarrCodecChainSpec& value) {
internal::EncodeCacheKey(out, value.ToJson().value().dump());
}
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::internal_zarr3::ZarrCodecChainSpec,
tensorstore::serialization::JsonBindableSerializer<
tensorstore::internal_zarr3::ZarrCodecChainSpec>()) | #include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::CodecSpec;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecMerge;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(CodecMergeTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto a,
CodecSpec::FromJson({
{"driver", "zarr3"},
{"codecs",
{{
{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {30, 40, 50}},
{"index_codecs",
{GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}},
{"codecs",
{
{{"name", "transpose"},
{"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
}},
}},
}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto b, CodecSpec::FromJson(
{{"driver", "zarr3"},
{"codecs",
{{{"name", "gzip"}, {"configuration", {{"level", 5}}}}}}}));
EXPECT_THAT(a.MergeFrom(b),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Incompatible \"level\": 6 vs 5"));
}
TEST(CodecChainSpecTest, MissingArrayToBytes) {
EXPECT_THAT(ZarrCodecChainSpec::FromJson(::nlohmann::json::array_t()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"array -> bytes codec must be specified"));
}
TEST(CodecChainSpecTest, MergeCodecNameMismatch) {
EXPECT_THAT(
TestCodecMerge({"gzip"}, {"crc32c"}, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition, "Cannot merge .*"));
}
TEST(CodecChainSpecTest, MergeArrayToBytes) {
EXPECT_THAT(
TestCodecMerge(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}},
::nlohmann::json::array_t(), true),
::testing::Optional(MatchesJson(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}})));
}
TEST(CodecChainSpecTest, ExtraTranspose) {
::nlohmann::json a = {
{{"name", "transpose"}, {"configuration", {{"order", {0, 2, 1}}}}},
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
};
::nlohmann::json b = {
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
};
EXPECT_THAT(TestCodecMerge(a, b, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(
TestCodecMerge(a, b, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Mismatch in number of array -> array codecs.*"));
}
TEST(CodecChainSpecTest, ExtraSharding) {
::nlohmann::json a = {{
{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {30, 40, 50}},
{"index_codecs", {GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}},
{"codecs",
{
{{"name", "transpose"},
{"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
}},
}},
}};
::nlohmann::json b = {
{{"name", "transpose"}, {"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
::nlohmann::json c = {
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
EXPECT_THAT(TestCodecMerge(a, b, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(TestCodecMerge(a, c, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(
TestCodecMerge(a, b, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Mismatch in number of array -> array codecs.*"));
EXPECT_THAT(TestCodecMerge(a, c, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Cannot merge zarr codec constraints .*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
0ecfd9b5-6bc6-4d8f-80b0-b0f99116647f | cpp | google/tensorstore | sharding_indexed | tensorstore/driver/zarr3/codec/sharding_indexed.cc | tensorstore/driver/zarr3/codec/sharding_indexed_test.cc | #include "tensorstore/driver/zarr3/codec/sharding_indexed.h"
#include <stdint.h>
#include <algorithm>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/bytes.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/crc32c.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
absl::Status SubChunkRankMismatch(span<const Index> sub_chunk_shape,
DimensionIndex outer_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"sharding_indexed sub-chunk shape of ", sub_chunk_shape,
" is not compatible with array of rank ", outer_rank));
}
absl::Status SubChunkShapeMismatch(span<const Index> sub_chunk_shape,
span<const Index> chunk_shape) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"sharding_indexed sub-chunk shape of ", sub_chunk_shape,
" does not evenly divide chunk shape of ", chunk_shape));
}
namespace {
class ShardingIndexedCodec : public ZarrShardingCodec {
public:
explicit ShardingIndexedCodec(
internal::ChunkGridSpecification&& sub_chunk_grid)
: sub_chunk_grid_(std::move(sub_chunk_grid)) {}
class State : public ZarrShardingCodec::PreparedState,
public internal::LexicographicalGridIndexKeyParser {
public:
absl::Status EncodeArray(SharedArrayView<const void> decoded,
riegeli::Writer& writer) const final {
return absl::InternalError("");
}
Result<SharedArray<const void>> DecodeArray(
span<const Index> decoded_shape, riegeli::Reader& reader) const final {
return absl::InternalError("");
}
kvstore::DriverPtr GetSubChunkKvstore(
kvstore::DriverPtr parent, std::string parent_key,
const Executor& executor,
internal::CachePool::WeakPtr cache_pool) const override {
zarr3_sharding_indexed::ShardedKeyValueStoreParameters params;
params.base_kvstore = std::move(parent);
params.base_kvstore_path = std::move(parent_key);
params.executor = executor;
params.cache_pool = std::move(cache_pool);
params.index_params = shard_index_params_;
return zarr3_sharding_indexed::GetShardedKeyValueStore(std::move(params));
}
const LexicographicalGridIndexKeyParser& GetSubChunkStorageKeyParser()
const final {
return *this;
}
std::string FormatKey(span<const Index> grid_indices) const final {
return zarr3_sharding_indexed::IndicesToKey(grid_indices);
}
bool ParseKey(std::string_view key, span<Index> grid_indices) const final {
return zarr3_sharding_indexed::KeyToIndices(key, grid_indices);
}
Index MinGridIndexForLexicographicalOrder(
DimensionIndex dim, IndexInterval grid_interval) const final {
return 0;
}
internal::IntrusivePtr<const ZarrShardingCodec> parent_codec_;
std::vector<Index> sub_chunk_grid_shape_;
ZarrCodecChain::PreparedState::Ptr codec_state_;
zarr3_sharding_indexed::ShardIndexParameters shard_index_params_;
};
Result<ZarrArrayToBytesCodec::PreparedState::Ptr> Prepare(
span<const Index> decoded_shape) const final {
span<const Index> sub_chunk_shape = sub_chunk_grid_.components[0].shape();
if (decoded_shape.size() != sub_chunk_shape.size()) {
return SubChunkRankMismatch(sub_chunk_shape, decoded_shape.size());
}
auto state = internal::MakeIntrusivePtr<State>();
state->parent_codec_.reset(this);
auto& sub_chunk_grid_shape = state->sub_chunk_grid_shape_;
sub_chunk_grid_shape.resize(decoded_shape.size());
for (DimensionIndex i = 0; i < sub_chunk_shape.size(); ++i) {
if (decoded_shape[i] % sub_chunk_shape[i] != 0) {
return SubChunkShapeMismatch(sub_chunk_shape, decoded_shape);
}
const int64_t grid_size = decoded_shape[i] / sub_chunk_shape[i];
sub_chunk_grid_shape[i] = grid_size;
}
TENSORSTORE_ASSIGN_OR_RETURN(
state->codec_state_, sub_chunk_codec_chain_->Prepare(sub_chunk_shape));
state->sub_chunk_grid = &sub_chunk_grid_;
state->sub_chunk_codec_chain = sub_chunk_codec_chain_.get();
state->sub_chunk_codec_state = state->codec_state_.get();
state->shard_index_params_.index_location = index_location_;
TENSORSTORE_RETURN_IF_ERROR(state->shard_index_params_.Initialize(
*index_codec_chain_, sub_chunk_grid_shape));
return {std::in_place, std::move(state)};
}
internal::ChunkGridSpecification sub_chunk_grid_;
ZarrCodecChain::Ptr sub_chunk_codec_chain_;
ZarrCodecChain::Ptr index_codec_chain_;
ShardIndexLocation index_location_;
};
}
absl::Status ShardingIndexedCodecSpec::MergeFrom(const ZarrCodecSpec& other,
bool strict) {
using Self = ShardingIndexedCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::sub_chunk_shape>(
"chunk_shape", options, other_options));
TENSORSTORE_RETURN_IF_ERROR(
internal_zarr3::MergeZarrCodecSpecs(options.index_codecs,
other_options.index_codecs, strict),
tensorstore::MaybeAnnotateStatus(_, "Incompatible \"index_codecs\""));
TENSORSTORE_RETURN_IF_ERROR(
internal_zarr3::MergeZarrCodecSpecs(
options.sub_chunk_codecs, other_options.sub_chunk_codecs, strict),
tensorstore::MaybeAnnotateStatus(_, "Incompatible sub-chunk \"codecs\""));
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::index_location>(
"index_location", options, other_options));
return absl::OkStatus();
}
absl::Status ShardingIndexedCodecSpec::MergeSubChunkCodecsFrom(
const ZarrCodecChainSpec& other, bool strict) {
if (!options.sub_chunk_codecs) {
options.sub_chunk_codecs = other;
return absl::OkStatus();
}
return options.sub_chunk_codecs->MergeFrom(other, strict);
}
ZarrCodecSpec::Ptr ShardingIndexedCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<ShardingIndexedCodecSpec>(*this);
}
const ZarrCodecChainSpec* ShardingIndexedCodecSpec::GetSubChunkCodecs() const {
return options.sub_chunk_codecs ? &*options.sub_chunk_codecs : nullptr;
}
absl::Status ShardingIndexedCodecSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
ArrayDataTypeAndShapeInfo sub_chunk_info;
if (options.sub_chunk_shape &&
!RankConstraint::Implies(options.sub_chunk_shape->size(),
array_info.rank)) {
return SubChunkRankMismatch(*options.sub_chunk_shape, array_info.rank);
}
sub_chunk_info.dtype = array_info.dtype;
sub_chunk_info.rank = array_info.rank;
if (options.sub_chunk_shape) {
std::copy(options.sub_chunk_shape->begin(), options.sub_chunk_shape->end(),
sub_chunk_info.shape.emplace().begin());
}
if (options.sub_chunk_codecs) {
TENSORSTORE_RETURN_IF_ERROR(options.sub_chunk_codecs->GetDecodedChunkLayout(
sub_chunk_info, decoded));
}
return absl::OkStatus();
}
namespace {
ZarrCodecChainSpec DefaultIndexCodecChainSpec() {
ZarrCodecChainSpec codecs;
codecs.array_to_bytes = DefaultBytesCodec();
codecs.bytes_to_bytes.push_back(
internal::MakeIntrusivePtr<const Crc32cCodecSpec>());
return codecs;
}
}
Result<ZarrArrayToBytesCodec::Ptr> ShardingIndexedCodecSpec::Resolve(
ArrayCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const {
ShardingIndexedCodecSpec::Options* resolved_options = nullptr;
if (resolved_spec) {
auto* resolved_spec_ptr = new ShardingIndexedCodecSpec;
resolved_options = &resolved_spec_ptr->options;
resolved_spec->reset(resolved_spec_ptr);
}
span<const Index> sub_chunk_shape;
if (options.sub_chunk_shape) {
sub_chunk_shape = *options.sub_chunk_shape;
} else if (decoded.read_chunk_shape) {
sub_chunk_shape =
span<const Index>(decoded.read_chunk_shape->data(), decoded.rank);
} else {
return absl::InvalidArgumentError("\"chunk_shape\" must be specified");
}
if (sub_chunk_shape.size() != decoded.rank) {
return SubChunkRankMismatch(sub_chunk_shape, decoded.rank);
}
internal::ChunkGridSpecification::ComponentList components;
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_fill_value,
BroadcastArray(decoded.fill_value, BoxView<>(sub_chunk_shape.size())));
components.emplace_back(
internal::AsyncWriteArray::Spec{std::move(broadcast_fill_value),
Box<>(sub_chunk_shape.size())},
std::vector<Index>(sub_chunk_shape.begin(), sub_chunk_shape.end()));
components.back().array_spec.fill_value_comparison_kind =
EqualityComparisonKind::identical;
auto codec = internal::MakeIntrusivePtr<ShardingIndexedCodec>(
internal::ChunkGridSpecification(std::move(components)));
codec->index_location_ =
options.index_location.value_or(ShardIndexLocation::kEnd);
if (resolved_options) {
resolved_options->sub_chunk_shape = codec->sub_chunk_grid_.chunk_shape;
resolved_options->index_location = codec->index_location_;
}
auto set_up_codecs =
[&](const ZarrCodecChainSpec& sub_chunk_codecs) -> absl::Status {
ArrayCodecResolveParameters sub_chunk_decoded;
sub_chunk_decoded.dtype = decoded.dtype;
sub_chunk_decoded.rank = decoded.rank;
sub_chunk_decoded.fill_value = std::move(decoded.fill_value);
if (decoded.read_chunk_shape) {
std::copy_n(decoded.read_chunk_shape->begin(), decoded.rank,
sub_chunk_decoded.read_chunk_shape.emplace().begin());
}
if (decoded.codec_chunk_shape) {
std::copy_n(decoded.codec_chunk_shape->begin(), decoded.rank,
sub_chunk_decoded.codec_chunk_shape.emplace().begin());
}
if (decoded.inner_order) {
std::copy_n(decoded.inner_order->begin(), decoded.rank,
sub_chunk_decoded.inner_order.emplace().begin());
}
TENSORSTORE_ASSIGN_OR_RETURN(
codec->sub_chunk_codec_chain_,
sub_chunk_codecs.Resolve(
std::move(sub_chunk_decoded), encoded,
resolved_options ? &resolved_options->sub_chunk_codecs.emplace()
: nullptr));
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(
set_up_codecs(options.sub_chunk_codecs ? *options.sub_chunk_codecs
: ZarrCodecChainSpec{}),
tensorstore::MaybeAnnotateStatus(_, "Error resolving sub-chunk codecs"));
auto set_up_index_codecs =
[&](const ZarrCodecChainSpec& index_codecs) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
codec->index_codec_chain_,
zarr3_sharding_indexed::InitializeIndexCodecChain(
index_codecs, sub_chunk_shape.size(),
resolved_options ? &resolved_options->index_codecs.emplace()
: nullptr));
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(
set_up_index_codecs(options.index_codecs ? *options.index_codecs
: DefaultIndexCodecChainSpec()),
tensorstore::MaybeAnnotateStatus(_, "Error resolving index_codecs"));
return {std::in_place, std::move(codec)};
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = ShardingIndexedCodecSpec;
using Options = Self::Options;
namespace jb = ::tensorstore::internal_json_binding;
RegisterCodec<Self>(
"sharding_indexed",
jb::Projection<&Self::options>(jb::Sequence(
jb::Member("chunk_shape", jb::Projection<&Options::sub_chunk_shape>(
OptionalIfConstraintsBinder(
jb::Array(jb::Integer<Index>(1))))),
jb::Member("index_codecs", jb::Projection<&Options::index_codecs>(
OptionalIfConstraintsBinder())),
jb::Member("codecs", jb::Projection<&Options::sub_chunk_codecs>(
OptionalIfConstraintsBinder())),
jb::Member(
"index_location",
jb::Projection<&Options::index_location>(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (!is_loading) {
if (!options.constraints &&
*obj == ShardIndexLocation::kEnd) {
return absl::OkStatus();
}
}
return jb::Validate([](const auto& options, auto* obj) {
if (!options.constraints) {
if (!obj->has_value()) *obj = ShardIndexLocation::kEnd;
}
return absl::OkStatus();
})(is_loading, options, obj, j);
}))
)));
}
}
} | #include <stdint.h>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::ArrayCodecResolveParameters;
using ::tensorstore::internal_zarr3::BytesCodecResolveParameters;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(ShardingIndexedTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec, ZarrCodecChainSpec::FromJson(
{{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}}}));
}
TEST(ShardingIndexedTest, InvalidBytesToBytes) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, ZarrCodecChainSpec::FromJson({
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
{
{"name", "gzip"},
{"configuration", {{"level", 5}}},
},
}));
ArrayCodecResolveParameters decoded_params;
decoded_params.dtype = tensorstore::dtype_v<uint32_t>;
decoded_params.rank = 2;
decoded_params.fill_value = tensorstore::MakeScalarArray<uint32_t>(42);
BytesCodecResolveParameters encoded_params;
EXPECT_THAT(
spec.Resolve(std::move(decoded_params), encoded_params, nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Sharding codec .* is not compatible with subsequent bytes "
"-> bytes .*"));
}
TEST(ShardingIndexedTest, DefaultIndexLocation) {
CodecSpecRoundTripTestParams p;
p.resolve_params.rank = 2;
p.orig_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.expected_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_location", "end"},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.to_json_options.constraints = true;
TestCodecSpecRoundTrip(p);
p.expected_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.to_json_options.constraints = false;
TestCodecSpecRoundTrip(p);
}
TEST(ShardingIndexedTest, IndexLocationEndNotStored) {
ArrayCodecResolveParameters p;
p.dtype = tensorstore::dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(TestCodecSpecResolve(
::nlohmann::json::array_t{
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
{"index_location", "end"},
}}}},
p,
false),
::testing::Optional(MatchesJson(::nlohmann::json::array_t{
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}}})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/sharding_indexed.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/sharding_indexed_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
324f4e92-107f-46ec-881e-dd70ba2de645 | cpp | google/tensorstore | grid_occupancy_map | tensorstore/driver/downsample/grid_occupancy_map.cc | tensorstore/driver/downsample/grid_occupancy_map_test.cc | #include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_downsample {
GridOccupancyMap::GridOccupancyMap(GridOccupancyTracker&& tracker,
BoxView<> domain)
: partition_points(domain.rank()) {
const DimensionIndex rank = domain.rank();
span<Index> occupied_chunks = tracker.occupied_chunks;
{
absl::flat_hash_map<Index, Index> partition_map;
for (DimensionIndex dim = 0; dim < rank; ++dim) {
partition_map.clear();
IndexInterval bounds = domain[dim];
partition_map.emplace(bounds.inclusive_min(), 0);
partition_map.emplace(bounds.exclusive_max(), 0);
for (ptrdiff_t i = dim; i < occupied_chunks.size(); i += 2 * rank) {
Index begin = occupied_chunks[i];
Index end = begin + occupied_chunks[i + rank];
partition_map.emplace(begin, 0);
partition_map.emplace(end, 0);
}
auto& dim_partition_points = partition_points[dim];
dim_partition_points.reserve(partition_map.size());
for (const auto& p : partition_map) {
dim_partition_points.push_back(p.first);
}
std::sort(dim_partition_points.begin(), dim_partition_points.end());
for (size_t i = 0, size = dim_partition_points.size(); i < size; ++i) {
partition_map.at(dim_partition_points[i]) = i;
}
for (ptrdiff_t i = dim; i < occupied_chunks.size(); i += 2 * rank) {
Index& begin = occupied_chunks[i];
Index& end = occupied_chunks[i + rank];
end = partition_map.at(begin + end);
begin = partition_map.at(begin);
}
}
}
Index grid_cell[kMaxRank];
span<Index> grid_cell_span(&grid_cell[0], rank);
{
for (DimensionIndex dim = 0; dim < rank; ++dim) {
grid_cell[dim] = partition_points[dim].size() - 1;
}
occupied_chunk_mask =
AllocateArray<bool>(grid_cell_span, c_order, value_init);
}
for (ptrdiff_t i = 0; i < occupied_chunks.size(); i += 2 * rank) {
std::copy_n(&occupied_chunks[i], rank, &grid_cell[0]);
do {
occupied_chunk_mask(grid_cell_span) = true;
} while (internal::AdvanceIndices(rank, &grid_cell[0], &occupied_chunks[i],
&occupied_chunks[i + rank]));
}
}
bool GridOccupancyMap::GetGridCellDomain(
span<const Index> grid_cell, MutableBoxView<> grid_cell_domain) const {
assert(grid_cell.size() == grid_cell_domain.rank());
assert(grid_cell.size() == rank());
if (occupied_chunk_mask(grid_cell)) return false;
for (DimensionIndex dim = 0; dim < grid_cell.size(); ++dim) {
const Index partition_index = grid_cell[dim];
grid_cell_domain[dim] = IndexInterval::UncheckedHalfOpen(
partition_points[dim][partition_index],
partition_points[dim][partition_index + 1]);
}
return true;
}
void GridOccupancyMap::InitializeCellIterator(span<Index> grid_cell) const {
std::fill(grid_cell.begin(), grid_cell.end(), 0);
}
bool GridOccupancyMap::AdvanceCellIterator(span<Index> grid_cell) const {
assert(grid_cell.size() == occupied_chunk_mask.rank());
return internal::AdvanceIndices(grid_cell.size(), grid_cell.data(),
occupied_chunk_mask.shape().data());
}
}
} | #include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_downsample::GridOccupancyMap;
using ::tensorstore::internal_downsample::GridOccupancyTracker;
std::vector<Box<>> GetUnoccupiedBoxes(const GridOccupancyMap& map) {
std::vector<Box<>> boxes;
std::vector<Index> grid_cell(map.rank());
map.InitializeCellIterator(grid_cell);
Box<> box(map.rank());
do {
if (map.GetGridCellDomain(grid_cell, box)) {
boxes.push_back(box);
}
} while (map.AdvanceCellIterator(grid_cell));
return boxes;
}
TEST(GridOccupancyMapTest, Rank1) {
GridOccupancyTracker tracker;
tracker.MarkOccupied(BoxView<1>({1}, {3}));
tracker.MarkOccupied(BoxView<1>({5}, {4}));
GridOccupancyMap map(std::move(tracker), BoxView<1>({-1}, {11}));
EXPECT_THAT(
map.partition_points,
::testing::ElementsAre(::testing::ElementsAre(-1, 1, 4, 5, 9, 10)));
EXPECT_EQ(map.occupied_chunk_mask, MakeArray<bool>({0, 1, 0, 1, 0}));
EXPECT_THAT(GetUnoccupiedBoxes(map),
::testing::ElementsAre(Box<>({-1}, {2}), Box<>({4}, {1}),
Box<>({9}, {1})));
}
TEST(GridOccupancyMapTest, Rank2) {
GridOccupancyTracker tracker;
tracker.MarkOccupied(BoxView<2>({0, 0}, {3, 2}));
tracker.MarkOccupied(BoxView<2>({3, 3}, {1, 3}));
tracker.MarkOccupied(BoxView<2>({0, 5}, {2, 3}));
GridOccupancyMap map(std::move(tracker), BoxView<2>({4, 10}));
EXPECT_THAT(
map.partition_points,
::testing::ElementsAre(::testing::ElementsAre(0, 2, 3, 4),
::testing::ElementsAre(0, 2, 3, 5, 6, 8, 10)));
EXPECT_EQ(map.occupied_chunk_mask, MakeArray<bool>({
{1, 0, 0, 1, 1, 0},
{1, 0, 0, 0, 0, 0},
{0, 0, 1, 1, 0, 0},
}));
EXPECT_THAT(
GetUnoccupiedBoxes(map),
::testing::ElementsAre(
Box<>({0, 2}, {2, 1}), Box<>({0, 3}, {2, 2}), Box<>({0, 8}, {2, 2}),
Box<>({2, 2}, {1, 1}), Box<>({2, 3}, {1, 2}), Box<>({2, 5}, {1, 1}),
Box<>({2, 6}, {1, 2}), Box<>({2, 8}, {1, 2}), Box<>({3, 0}, {1, 2}),
Box<>({3, 2}, {1, 1}), Box<>({3, 6}, {1, 2}), Box<>({3, 8}, {1, 2})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/grid_occupancy_map.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/grid_occupancy_map_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9d9cd715-605d-40e6-ac89-395c9fa1afa1 | cpp | google/tensorstore | downsample_util | tensorstore/driver/downsample/downsample_util.cc | tensorstore/driver/downsample/downsample_util_test.cc | #include "tensorstore/driver/downsample/downsample_util.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <limits>
#include <ostream>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/identity_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_downsample {
std::ostream& operator<<(std::ostream& os,
const PropagatedIndexTransformDownsampling& x) {
return os << "transform=" << x.transform << "\ninput_downsample_factors="
<< absl::StrJoin(x.input_downsample_factors, ",");
}
namespace {
DimensionIndex ComputeAdditionalInputDimensionsNeeded(
IndexTransformView<> downsampled_transform,
span<const Index> output_downsample_factors,
span<DimensionIndex> input_dimension_ref_counts, bool is_domain_empty) {
using internal_index_space::TransformAccess;
assert(downsampled_transform.valid());
const DimensionIndex output_rank = downsampled_transform.output_rank();
assert(input_dimension_ref_counts.size() ==
downsampled_transform.input_rank());
assert(output_downsample_factors.size() == output_rank);
DimensionIndex additional_input_dims = 0;
auto old_transform_rep = TransformAccess::rep(downsampled_transform);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
assert(output_downsample_factors[output_dim] > 0);
if (output_downsample_factors[output_dim] == 1) {
continue;
}
const auto& output_map = old_transform_rep->output_index_maps()[output_dim];
switch (output_map.method()) {
case OutputIndexMethod::constant:
if (!is_domain_empty) {
++additional_input_dims;
}
break;
case OutputIndexMethod::single_input_dimension:
if ((std::abs(output_map.stride()) != 1 ||
input_dimension_ref_counts[output_map.input_dimension()] != 1) &&
!downsampled_transform.input_domain()
.box()[output_map.input_dimension()]
.empty()) {
++additional_input_dims;
}
break;
case OutputIndexMethod::array: {
++additional_input_dims;
break;
}
}
}
return additional_input_dims;
}
absl::Status ExtendOutputIndexMap(
const internal_index_space::OutputIndexMap& output_map,
internal_index_space::OutputIndexMap& new_output_map,
DimensionIndex input_rank, DimensionIndex new_input_rank) {
new_output_map.offset() = output_map.offset();
new_output_map.stride() = output_map.stride();
switch (output_map.method()) {
case OutputIndexMethod::constant:
new_output_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension:
new_output_map.SetSingleInputDimension(output_map.input_dimension());
break;
case OutputIndexMethod::array: {
const auto& index_array_data = output_map.index_array_data();
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_rank);
new_index_array_data.element_pointer = index_array_data.element_pointer;
new_index_array_data.index_range = index_array_data.index_range;
std::copy_n(index_array_data.byte_strides, input_rank,
new_index_array_data.byte_strides);
std::fill_n(new_index_array_data.byte_strides + input_rank,
new_input_rank - input_rank, Index(0));
break;
}
}
return absl::OkStatus();
}
absl::Status PropagateUnitStrideSingleInputDimensionMapDownsampling(
Index original_offset, Index original_stride, IndexInterval input_interval,
Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
assert(original_stride == 1 || original_stride == -1);
if (internal::MulOverflow(original_offset, output_downsample_factor,
&new_output_map.offset())) {
return absl::OutOfRangeError(
tensorstore::StrCat("Integer overflow computing output offset ",
original_offset, " * ", output_downsample_factor));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto bounds_interval,
GetAffineTransformDomain(output_base_bounds, new_output_map.offset(),
original_stride));
auto input_bounds = DownsampleInterval(
bounds_interval, output_downsample_factor, DownsampleMethod::kMean);
if (!Contains(input_bounds, input_interval)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Propagated bounds interval ", input_bounds,
" does not contain ", input_interval));
}
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
new_output_map.SetSingleInputDimension(new_input_dim);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_interval,
GetAffineTransformInverseDomain(
input_interval, 0, original_stride * output_downsample_factor));
new_interval = Intersect(new_interval, bounds_interval);
new_output_map.stride() = original_stride;
new_input_domain[new_input_dim] = new_interval;
return absl::OkStatus();
}
absl::Status PropagateSingleInputDimensionMapDownsamplingAsNewDimension(
const internal_index_space::OutputIndexMap& output_map,
IndexInterval input_interval, Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
if (input_interval.size() == 1 || output_map.stride() == 0) {
Index adjusted_offset;
if (internal::MulOverflow(input_interval.inclusive_min(),
output_map.stride(), &adjusted_offset) ||
internal::AddOverflow(adjusted_offset, output_map.offset(),
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " + ",
input_interval.inclusive_min(), " * ", output_map.stride()));
}
return PropagateUnitStrideSingleInputDimensionMapDownsampling(
adjusted_offset, 1,
IndexInterval::UncheckedSized(0, 1),
output_downsample_factor, new_output_map, output_base_bounds,
new_input_domain, new_input_dim, propagated);
}
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
new_output_map.offset() = 0;
new_output_map.stride() = 1;
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_domain.rank());
new_index_array_data.index_range = output_base_bounds;
Index adjusted_stride;
Index adjusted_offset;
if (internal::MulOverflow(output_map.stride(), output_downsample_factor,
&adjusted_stride)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing stride ", output_map.stride(), " * ",
output_downsample_factor));
}
if (internal::MulOverflow(output_map.offset(), output_downsample_factor,
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " * ",
output_downsample_factor));
}
if (!input_interval.empty()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto output_range,
GetAffineTransformRange(input_interval, adjusted_offset,
adjusted_stride));
TENSORSTORE_ASSIGN_OR_RETURN(
output_range,
ShiftInterval(output_range, output_downsample_factor - 1, 0));
if (!Contains(output_base_bounds, output_range)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Output bounds interval ", output_base_bounds,
" does not contain output range interval ", output_range));
}
}
std::fill_n(new_index_array_data.byte_strides, new_input_domain.rank(),
Index(0));
new_index_array_data.byte_strides[output_map.input_dimension()] = 1;
new_index_array_data.byte_strides[new_input_dim] = 2;
new_index_array_data.element_pointer = AllocateArrayElementsLike<Index>(
new_index_array_data.layout(new_input_domain),
new_index_array_data.byte_strides, skip_repeated_elements);
Index* array_origin =
const_cast<Index*>(new_index_array_data.array_view(new_input_domain)
.byte_strided_origin_pointer()
.get());
for (Index j = 0; j < input_interval.size(); ++j) {
const Index base_index =
adjusted_offset +
adjusted_stride * (input_interval.inclusive_min() + j);
for (Index i = 0; i < output_downsample_factor; ++i) {
Index x;
if (internal::AddOverflow(base_index, i, &x) ||
x > output_base_bounds.inclusive_max()) {
x = output_base_bounds.inclusive_max();
} else if (x < output_base_bounds.inclusive_min()) {
x = output_base_bounds.inclusive_min();
}
array_origin[input_interval.size() * i + j] = x;
}
}
return absl::OkStatus();
}
absl::Status PropagateIndexMapThatRequiresNewInputDimensionForEmptyDomain(
Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
MutableBoxView<> new_input_domain, DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
new_output_map.SetConstant();
new_output_map.offset() = 0;
new_output_map.stride() = 0;
return absl::OkStatus();
}
absl::Status PropagateIndexArrayMapDownsampling(
const internal_index_space::OutputIndexMap& output_map,
BoxView<> downsampled_input_domain, Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
new_output_map.offset() = 0;
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
const DimensionIndex input_rank = downsampled_input_domain.rank();
const auto& index_array_data = output_map.index_array_data();
new_output_map.stride() = 1;
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_domain.rank());
Index adjusted_stride;
Index adjusted_offset;
if (internal::MulOverflow(output_map.stride(), output_downsample_factor,
&adjusted_stride)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing stride ", output_map.stride(), " * ",
output_downsample_factor));
}
if (internal::MulOverflow(output_map.offset(), output_downsample_factor,
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " * ",
output_downsample_factor));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto padded_output_interval,
ShiftInterval(output_base_bounds, -(output_downsample_factor - 1), 0));
TENSORSTORE_ASSIGN_OR_RETURN(
auto effective_index_range,
GetAffineTransformDomain(padded_output_interval, adjusted_offset,
adjusted_stride));
effective_index_range =
Intersect(effective_index_range, index_array_data.index_range);
new_index_array_data.index_range = output_base_bounds;
std::copy_n(index_array_data.byte_strides, input_rank,
new_index_array_data.byte_strides);
std::fill_n(new_index_array_data.byte_strides + input_rank,
new_input_domain.rank() - input_rank, Index(0));
new_index_array_data.byte_strides[new_input_dim] =
std::numeric_limits<Index>::max();
new_index_array_data.element_pointer = AllocateArrayElementsLike<Index>(
new_index_array_data.layout(new_input_domain),
new_index_array_data.byte_strides, skip_repeated_elements);
absl::Status status;
IterateOverArrays(
[&](const Index* existing_index,
ByteStridedPointer<const Index> new_index) {
const Index existing_index_value = *existing_index;
if (!Contains(effective_index_range, existing_index_value)) {
status = CheckContains(effective_index_range, existing_index_value);
return false;
}
Index base_index =
existing_index_value * adjusted_stride + adjusted_offset;
const Index byte_stride =
new_index_array_data.byte_strides[new_input_dim];
Index cur_index =
std::max(base_index, output_base_bounds.inclusive_min());
for (Index i = 0; i < output_downsample_factor; ++i) {
Index x;
if (!internal::AddOverflow(base_index, i, &x) &&
output_base_bounds.exclusive_max() > x) {
cur_index = std::max(cur_index, x);
}
assert(Contains(output_base_bounds, cur_index));
*const_cast<Index*>((new_index + i * byte_stride).get()) = cur_index;
}
return true;
},
skip_repeated_elements,
index_array_data.array_view(downsampled_input_domain),
new_index_array_data.array_view(downsampled_input_domain));
return status;
}
}
absl::Status PropagateIndexTransformDownsampling(
IndexTransformView<> downsampled_transform, BoxView<> output_base_bounds,
span<const Index> output_downsample_factors,
PropagatedIndexTransformDownsampling& propagated) {
using internal_index_space::TransformAccess;
using internal_index_space::TransformRep;
assert(downsampled_transform.valid());
const DimensionIndex output_rank = downsampled_transform.output_rank();
const DimensionIndex input_rank = downsampled_transform.input_rank();
assert(output_base_bounds.rank() == output_rank);
assert(output_downsample_factors.size() == output_rank);
DimensionIndex input_dimension_ref_counts[kMaxRank];
internal::ComputeInputDimensionReferenceCounts(
downsampled_transform, span(&input_dimension_ref_counts[0], input_rank));
const bool is_domain_empty = downsampled_transform.domain().box().is_empty();
DimensionIndex additional_input_dims = ComputeAdditionalInputDimensionsNeeded(
downsampled_transform, output_downsample_factors,
{input_dimension_ref_counts, input_rank}, is_domain_empty);
const DimensionIndex new_input_rank = input_rank + additional_input_dims;
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(new_input_rank));
auto new_transform = TransformRep::Allocate(new_input_rank, output_rank);
new_transform->output_rank = output_rank;
internal_index_space::CopyTransformRepDomain(
TransformAccess::rep(downsampled_transform), new_transform.get());
new_transform->input_rank = new_input_rank;
new_transform->implicit_lower_bounds = false;
new_transform->implicit_upper_bounds = false;
MutableBoxView<> input_domain = new_transform->input_domain(new_input_rank);
std::fill(input_domain.origin().begin() + input_rank,
input_domain.origin().begin() + new_input_rank, Index(0));
std::fill(input_domain.shape().begin() + input_rank,
input_domain.shape().begin() + new_input_rank, Index(1));
propagated.input_downsample_factors.clear();
propagated.input_downsample_factors.resize(new_input_rank, 1);
DimensionIndex next_input_dim = input_rank;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& output_map = TransformAccess::rep(downsampled_transform)
->output_index_maps()[output_dim];
auto& new_output_map = new_transform->output_index_maps()[output_dim];
const Index output_downsample_factor =
output_downsample_factors[output_dim];
if (output_downsample_factor == 1) {
TENSORSTORE_RETURN_IF_ERROR(ExtendOutputIndexMap(
output_map, new_output_map, input_rank, new_input_rank));
continue;
}
absl::Status status;
switch (output_map.method()) {
case OutputIndexMethod::constant: {
if (is_domain_empty) {
new_output_map.SetConstant();
new_output_map.offset() = 0;
new_output_map.stride() = 0;
break;
}
status = PropagateUnitStrideSingleInputDimensionMapDownsampling(
output_map.offset(), 1,
IndexInterval::UncheckedSized(0, 1),
output_downsample_factor, new_output_map,
output_base_bounds[output_dim], input_domain,
next_input_dim++, propagated);
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = output_map.input_dimension();
const IndexInterval input_interval =
downsampled_transform.input_domain().box()[input_dim];
if (std::abs(output_map.stride()) == 1 &&
input_dimension_ref_counts[input_dim] == 1) {
status = PropagateUnitStrideSingleInputDimensionMapDownsampling(
output_map.offset(),
output_map.stride(),
input_interval, output_downsample_factor,
new_output_map,
output_base_bounds[output_dim],
input_domain,
input_dim, propagated);
break;
}
if (!IsFinite(input_interval)) {
status = absl::InvalidArgumentError(tensorstore::StrCat(
"Input domain ", input_interval, " is not finite"));
break;
}
if (input_interval.empty()) {
new_output_map.SetSingleInputDimension(input_dim);
new_output_map.offset() = 0;
new_output_map.stride() = 1;
break;
}
status =
is_domain_empty
? PropagateIndexMapThatRequiresNewInputDimensionForEmptyDomain(
output_downsample_factor, new_output_map, input_domain,
next_input_dim++, propagated)
: PropagateSingleInputDimensionMapDownsamplingAsNewDimension(
output_map, input_interval, output_downsample_factor,
new_output_map, output_base_bounds[output_dim],
input_domain, next_input_dim++, propagated);
break;
}
case OutputIndexMethod::array: {
status =
is_domain_empty
? PropagateIndexMapThatRequiresNewInputDimensionForEmptyDomain(
output_downsample_factor, new_output_map, input_domain,
next_input_dim++, propagated)
: PropagateIndexArrayMapDownsampling(
output_map, downsampled_transform.domain().box(),
output_downsample_factor, new_output_map,
output_base_bounds[output_dim], input_domain,
next_input_dim++, propagated);
break;
}
}
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(
status,
tensorstore::StrCat("Propagating downsampling factor ",
output_downsample_factor,
" through output dimension ", output_dim));
}
}
internal_index_space::DebugCheckInvariants(new_transform.get());
propagated.transform =
internal_index_space::TransformAccess::Make<IndexTransform<>>(
std::move(new_transform));
return absl::OkStatus();
}
absl::Status PropagateAndComposeIndexTransformDownsampling(
IndexTransformView<> downsampled_transform,
IndexTransformView<> base_transform,
span<const Index> base_downsample_factors,
PropagatedIndexTransformDownsampling& propagated) {
TENSORSTORE_RETURN_IF_ERROR(PropagateIndexTransformDownsampling(
downsampled_transform, base_transform.domain().box(),
base_downsample_factors, propagated));
TENSORSTORE_ASSIGN_OR_RETURN(
propagated.transform,
ComposeTransforms(base_transform, propagated.transform));
return absl::OkStatus();
}
Result<PropagatedIndexTransformDownsampling>
PropagateIndexTransformDownsampling(
IndexTransformView<> downsampled_transform, BoxView<> output_base_bounds,
span<const Index> output_downsample_factors) {
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(PropagateIndexTransformDownsampling(
downsampled_transform, output_base_bounds, output_downsample_factors,
propagated));
return propagated;
}
IndexInterval DownsampleInterval(IndexInterval base_interval,
Index downsample_factor,
DownsampleMethod method) {
assert(downsample_factor > 0);
Index inclusive_min;
if (base_interval.inclusive_min() == -kInfIndex) {
inclusive_min = -kInfIndex;
} else {
switch (method) {
case DownsampleMethod::kStride:
inclusive_min =
CeilOfRatio(base_interval.inclusive_min(), downsample_factor);
break;
case DownsampleMethod::kMean:
case DownsampleMethod::kMin:
case DownsampleMethod::kMax:
case DownsampleMethod::kMedian:
case DownsampleMethod::kMode:
inclusive_min =
FloorOfRatio(base_interval.inclusive_min(), downsample_factor);
break;
default:
ABSL_UNREACHABLE();
}
}
Index inclusive_max;
if (base_interval.inclusive_max() == kInfIndex) {
inclusive_max = kInfIndex;
} else if (base_interval.empty()) {
inclusive_max = inclusive_min - 1;
} else {
inclusive_max =
FloorOfRatio(base_interval.inclusive_max(), downsample_factor);
}
return IndexInterval::UncheckedClosed(inclusive_min, inclusive_max);
}
void DownsampleBounds(BoxView<> base_bounds,
MutableBoxView<> downsampled_bounds,
span<const Index> downsample_factors,
DownsampleMethod method) {
const DimensionIndex rank = base_bounds.rank();
assert(rank == downsampled_bounds.rank());
assert(rank == downsample_factors.size());
for (DimensionIndex i = 0; i < rank; ++i) {
downsampled_bounds[i] =
DownsampleInterval(base_bounds[i], downsample_factors[i], method);
}
}
namespace {
class DownsampleDomainBuilder {
public:
explicit DownsampleDomainBuilder(IndexDomainView<> base_domain,
bool domain_only) {
const DimensionIndex input_rank = base_domain.rank();
const DimensionIndex output_rank = domain_only ? 0 : input_rank;
rep = internal_index_space::TransformRep::Allocate(input_rank, output_rank);
rep->input_rank = input_rank;
rep->output_rank = output_rank;
rep->implicit_lower_bounds = base_domain.implicit_lower_bounds();
rep->implicit_upper_bounds = base_domain.implicit_upper_bounds();
const auto& labels = base_domain.labels();
std::copy(labels.begin(), labels.end(), rep->input_labels().begin());
if (!domain_only) {
internal_index_space::SetToIdentityTransform(rep->output_index_maps());
}
}
MutableBoxView<> InputBounds() { return rep->input_domain(rep->input_rank); }
IndexTransform<> MakeTransform() {
internal_index_space::DebugCheckInvariants(rep.get());
return internal_index_space::TransformAccess::Make<IndexTransform<>>(
std::move(rep));
}
private:
internal_index_space::TransformRep::Ptr<> rep;
};
}
IndexDomain<> DownsampleDomain(IndexDomainView<> base_domain,
span<const Index> downsample_factors,
DownsampleMethod method) {
DownsampleDomainBuilder builder(base_domain, true);
DownsampleBounds(base_domain.box(), builder.InputBounds(), downsample_factors,
method);
return builder.MakeTransform().domain();
}
IndexTransform<> GetDownsampledDomainIdentityTransform(
IndexDomainView<> base_domain, span<const Index> downsample_factors,
DownsampleMethod method) {
DownsampleDomainBuilder builder(base_domain, false);
DownsampleBounds(base_domain.box(), builder.InputBounds(), downsample_factors,
method);
return builder.MakeTransform();
}
bool CanDownsampleIndexTransform(IndexTransformView<> base_transform,
BoxView<> base_bounds,
span<const Index> downsample_factors) {
const Index output_rank = base_transform.output_rank();
assert(base_bounds.rank() == output_rank);
assert(downsample_factors.size() == output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const Index downsample_factor = downsample_factors[output_dim];
const auto base_interval = base_bounds[output_dim];
const auto map = base_transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
if (downsample_factor != 1 &&
((base_interval.inclusive_min() != map.offset() &&
((map.offset() % downsample_factor) != 0)) ||
(base_interval.inclusive_max() != map.offset() &&
((map.offset() + 1) % downsample_factor) != 0))) {
return false;
}
break;
case OutputIndexMethod::single_input_dimension: {
if (downsample_factor == 1) break;
if (map.stride() != 1 && map.stride() != -1) {
return false;
}
const auto input_interval =
base_transform.input_domain().box()[map.input_dimension()];
TENSORSTORE_ASSIGN_OR_RETURN(
auto shifted_interval,
GetAffineTransformRange(input_interval, map.offset(), map.stride()),
false);
if ((base_interval.inclusive_min() !=
shifted_interval.inclusive_min() &&
(shifted_interval.inclusive_min() % downsample_factor) != 0) ||
(base_interval.exclusive_max() !=
shifted_interval.exclusive_max() &&
(shifted_interval.exclusive_max() % downsample_factor) != 0)) {
return false;
}
break;
}
case OutputIndexMethod::array:
return false;
}
}
return true;
}
}
} | #include "tensorstore/driver/downsample/downsample_util.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/downsample/downsample_array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/data_type_random_generator.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_downsample::CanDownsampleIndexTransform;
using ::tensorstore::internal_downsample::DownsampleArray;
using ::tensorstore::internal_downsample::DownsampleBounds;
using ::tensorstore::internal_downsample::DownsampleInterval;
using ::tensorstore::internal_downsample::DownsampleTransformedArray;
using ::tensorstore::internal_downsample::PropagatedIndexTransformDownsampling;
using ::tensorstore::internal_downsample::PropagateIndexTransformDownsampling;
using ::testing::Optional;
TEST(PropagateIndexTransformDownsamplingTest, Rank0) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(0), {}, {}),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(0), {}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1SingleInputDimension) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({1}, {3})),
BoxView<1>({7}), span<const Index>({2})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({2}, {5})), {2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, InvalidRank) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_transform,
tensorstore::IdentityTransform(32) | Dims(0).Stride(2));
EXPECT_THAT(PropagateIndexTransformDownsampling(
downsampled_transform, Box(32), std::vector<Index>(32, 2)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]"));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1Constant) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 2).Finalize().value(),
BoxView({7}, {2}), span<const Index>({3})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(1, 1)
.input_origin({1})
.input_exclusive_max({3})
.output_single_input_dimension(0, 6, 1, 0)
.Finalize()
.value(),
{3}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank1SingleInputDimensionPartialStartBlock) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({0}, {4})),
BoxView({1}, {6}), span<const Index>({2})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({1}, {6})), {2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank2WithIgnoredDimension) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({1, 2}, {3, 5})),
BoxView({7, 10}), span<const Index>({2, 1})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({2, 2}, {5, 5})), {2, 1}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1IndexArray) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({4, 7, 3}))
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 1)
.input_shape({3, 4})
.output_index_array(0, 0, 1,
MakeArray<Index>({{16, 17, 18, 19},
{28, 29, 30, 31},
{12, 13, 14, 15}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank3IndexArrayConstantNoDownsampling) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(2, 3)
.input_shape({3, 4})
.output_index_array(0, 0, 1, MakeArray<Index>({{4}, {7}, {3}}))
.output_single_input_dimension(1, 1)
.output_constant(2, 42)
.Finalize()
.value(),
BoxView({30, 50, 55}), span<const Index>({1, 2, 1})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 3)
.input_shape({3, 8})
.output_index_array(0, 0, 1, MakeArray<Index>({{4}, {7}, {3}}))
.output_single_input_dimension(1, 1)
.output_constant(2, 42)
.Finalize()
.value(),
{1, 2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank2IndexArray) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}))
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(3, 1)
.input_shape({2, 3, 4})
.output_index_array(
0, 0, 1,
MakeArray<Index>(
{{{4, 5, 6, 7}, {8, 9, 10, 11}, {12, 13, 14, 15}},
{{16, 17, 18, 19}, {20, 21, 22, 23}, {24, 25, 26, 27}}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank1SingleInputDimensionStrided) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_single_input_dimension(0, 1, 5, 0)
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 1)
.input_shape({3, 4})
.output_index_array(0, 0, 1,
MakeArray<Index>({{4, 5, 6, 7},
{24, 25, 26, 27},
{44, 45, 46, 47}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorRank1ConstantOverflow) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1)
.output_constant(0, tensorstore::kMaxFiniteIndex)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorRank1ConstantOutOfBounds) {
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {15}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {14}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {13}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({1}, {13}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({2}, {13}), span<const Index>({3})));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 5).Finalize().value(),
BoxView<1>({0}, {15}), span<const Index>({3})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Propagated bounds interval .* does not contain .*"));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({3}, {15}), span<const Index>({3})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Propagated bounds interval .* does not contain .*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimensionStridedNonFiniteDomain) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({0})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Input domain .* is not finite"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimensionSize1StridedOverflow) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({100})
.input_shape({1})
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({100})
.input_shape({1})
.output_single_input_dimension(
0, 0, std::numeric_limits<Index>::max(), 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedInvalidDownsampleFactor) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}),
span<const Index>({std::numeric_limits<Index>::max()})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Downsample factor is out of range"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOverflowMultiplyingStrideAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOverflowMultiplyingOffsetAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}), span<const Index>({0xfffffffffffff})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOutOfRange) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {199}), span<const Index>({2})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Output bounds interval .* does not contain "
"output range interval .*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayInvalidDownsampleFactor) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}),
span<const Index>({std::numeric_limits<Index>::max()})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Downsample factor is out of range"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayOverflowMultiplyingStrideAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 100, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayOverflowMultiplyingOffsetAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 100, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorIndexArrayOutOfRange) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {9}), span<const Index>({2})),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagating downsampling factor 2 through output dimension 0: "
"Index 5 is outside valid range \\[0, 5\\)"));
}
TEST(CanDownsampleIndexTransformTest, Rank0) {
EXPECT_TRUE(
CanDownsampleIndexTransform(tensorstore::IdentityTransform(0), {}, {}));
}
TEST(CanDownsampleIndexTransformTest, Constant) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(1) | Dims(0).IndexSlice(42));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({42}, {1}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({42}, {2}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({41}, {3}),
span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({41}, {2}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({100}),
span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({100}),
span<const Index>({1})));
}
TEST(CanDownsampleIndexTransformTest, SingleInputDimension) {
EXPECT_TRUE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3)).value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(18, 1))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 2)).value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3, -1))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(10, 2))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3, 2))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
}
TEST(CanDownsampleIndexTransformTest, IndexArray) {
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) |
Dims(0).IndexArraySlice(MakeArray<Index>({2, 5, 3})))
.value(),
BoxView<1>({0}, {100}), span<const Index>({2})));
}
void TestPropagateIndexTransformDownsamplingInvariance(DimensionIndex rank) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_DOWNSAMPLE_PROPAGATE_INVARIANCE_SEED")};
tensorstore::internal::MakeRandomBoxParameters box_p;
box_p.min_rank = box_p.max_rank = rank;
auto base_bounds = tensorstore::internal::MakeRandomBox(gen, box_p);
SCOPED_TRACE(tensorstore::StrCat("base_bounds=", base_bounds));
auto base_data = tensorstore::internal::MakeRandomArray(
gen, base_bounds, tensorstore::dtype_v<uint8_t>);
SCOPED_TRACE(tensorstore::StrCat("base_data=", base_data));
std::vector<Index> downsample_factors(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
downsample_factors[i] =
absl::Uniform<Index>(absl::IntervalClosedClosed, gen, 1, 2);
}
SCOPED_TRACE(tensorstore::StrCat("downsample_factors=",
tensorstore::span(downsample_factors)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_data,
DownsampleArray(base_data, downsample_factors, DownsampleMethod::kMean));
Box<> downsampled_bounds(rank);
DownsampleBounds(base_bounds, downsampled_bounds, downsample_factors,
DownsampleMethod::kMean);
SCOPED_TRACE(tensorstore::StrCat("downsampled_bounds=", downsampled_bounds));
auto downsampled_transform = tensorstore::internal::MakeRandomIndexTransform(
gen, downsampled_bounds, rank * 2);
SCOPED_TRACE(
tensorstore::StrCat("downsampled_transform=", downsampled_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto propagated,
PropagateIndexTransformDownsampling(downsampled_transform, base_bounds,
downsample_factors));
SCOPED_TRACE(tensorstore::StrCat("propagated=", propagated));
SCOPED_TRACE(tensorstore::StrCat("downsampled_data=", downsampled_data));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsample_then_transform,
downsampled_data | downsampled_transform | tensorstore::Materialize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transformed_base,
base_data | propagated.transform);
tensorstore::SharedOffsetArray<const void> transform_then_downsample;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
transform_then_downsample,
DownsampleTransformedArray(transformed_base,
propagated.input_downsample_factors,
DownsampleMethod::kMean));
if (downsampled_transform.input_rank() < propagated.transform.input_rank()) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
transform_then_downsample,
transform_then_downsample |
tensorstore::DynamicDims(
{tensorstore::DimRangeSpec{downsampled_transform.input_rank()}})
.IndexSlice(0) |
tensorstore::Materialize());
}
EXPECT_EQ(transform_then_downsample, downsample_then_transform);
}
constexpr size_t kNumRandomTests = 50;
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank0) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(0);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank1) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(1);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank2) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(2);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank3) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(3);
}
}
TEST(DownsampleIntervalTest, UnboundedLower) {
EXPECT_EQ(IndexInterval::Closed(-kInfIndex, 10),
DownsampleInterval(IndexInterval::UncheckedClosed(-kInfIndex, 30),
3, DownsampleMethod::kMean));
}
TEST(DownsampleIntervalTest, UnboundedUpper) {
EXPECT_EQ(IndexInterval::Closed(-10, kInfIndex),
DownsampleInterval(IndexInterval::UncheckedClosed(-30, kInfIndex),
3, DownsampleMethod::kMean));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9f7a8b01-7a36-410b-8f64-3545915fd10c | cpp | google/tensorstore | downsample_array | tensorstore/driver/downsample/downsample_array.cc | tensorstore/driver/downsample/downsample_array_test.cc | #include "tensorstore/driver/downsample/downsample_array.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/downsample/downsample_nditerable.h"
#include "tensorstore/driver/downsample/downsample_util.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_downsample {
namespace {
absl::Status ValidateDownsampleDomain(BoxView<> base_domain,
BoxView<> downsampled_domain,
span<const Index> downsample_factors,
DownsampleMethod method) {
const DimensionIndex rank = base_domain.rank();
if (rank != downsampled_domain.rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample domain ", base_domain, " to domain ",
downsampled_domain, " with different rank"));
}
if (rank != downsample_factors.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample domain ", base_domain, " with downsample factors ",
downsample_factors, " of different rank"));
}
for (DimensionIndex i = 0; i < rank; ++i) {
const auto expected_interval =
DownsampleInterval(base_domain[i], downsample_factors[i], method);
if (expected_interval != downsampled_domain[i]) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample array with domain ", base_domain, " by factors ",
downsample_factors, " with method ", method, " to array with domain ",
downsampled_domain, ": expected target dimension ", i,
" to have domain ", expected_interval));
}
}
return absl::OkStatus();
}
}
absl::Status DownsampleArray(OffsetArrayView<const void> source,
OffsetArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method) {
if (source.dtype() != target.dtype()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Source data type (", source.dtype(),
") does not match target data type (", target.dtype(), ")"));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleDomain(
source.domain(), target.domain(), downsample_factors, method));
if (method == DownsampleMethod::kStride) {
return CopyTransformedArray(
source | tensorstore::AllDims().Stride(downsample_factors), target);
}
internal::DefaultNDIterableArena arena;
auto base_iterable = GetArrayNDIterable(UnownedToShared(source), arena);
auto target_iterable = GetArrayNDIterable(UnownedToShared(target), arena);
auto downsampled_iterable = DownsampleNDIterable(
std::move(base_iterable), source.domain(), downsample_factors, method,
downsample_factors.size(), arena);
internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,
target.shape(), skip_repeated_elements,
arena);
return copier.Copy();
}
Result<SharedOffsetArray<void>> DownsampleArray(
OffsetArrayView<const void> source, span<const Index> downsample_factors,
DownsampleMethod method) {
SharedOffsetArray<void> target;
target.layout().set_rank(source.rank());
DownsampleBounds(source.domain(),
MutableBoxView<>(target.origin(), target.shape()),
downsample_factors, method);
target.element_pointer() = AllocateArrayElementsLike<void>(
StridedLayoutView<dynamic_rank, offset_origin>(
target.rank(), target.origin().data(), target.shape().data(),
source.byte_strides().data()),
target.byte_strides().data(), skip_repeated_elements, default_init,
source.dtype());
TENSORSTORE_RETURN_IF_ERROR(
DownsampleArray(source, target, downsample_factors, method));
return target;
}
absl::Status DownsampleTransformedArray(TransformedArrayView<const void> source,
TransformedArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method) {
if (source.dtype() != target.dtype()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Source data type (", source.dtype(),
") does not match target data type (", target.dtype(), ")"));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));
TENSORSTORE_RETURN_IF_ERROR(
ValidateDownsampleDomain(source.domain().box(), target.domain().box(),
downsample_factors, method));
if (method == DownsampleMethod::kStride) {
return CopyTransformedArray(
std::move(source) | tensorstore::AllDims().Stride(downsample_factors),
target);
}
internal::DefaultNDIterableArena arena;
TENSORSTORE_ASSIGN_OR_RETURN(
auto base_iterable,
GetTransformedArrayNDIterable(UnownedToShared(source), arena));
TENSORSTORE_ASSIGN_OR_RETURN(
auto target_iterable,
GetTransformedArrayNDIterable(UnownedToShared(target), arena));
auto downsampled_iterable = DownsampleNDIterable(
std::move(base_iterable), source.domain().box(), downsample_factors,
method, downsample_factors.size(), arena);
internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,
target.shape(), skip_repeated_elements,
arena);
return copier.Copy();
}
Result<SharedOffsetArray<void>> DownsampleTransformedArray(
TransformedArrayView<const void> source,
span<const Index> downsample_factors, DownsampleMethod method) {
SharedOffsetArray<void> target;
target.layout().set_rank(source.rank());
DownsampleBounds(source.domain().box(),
MutableBoxView<>(target.origin(), target.shape()),
downsample_factors, method);
target =
AllocateArray(target.domain(), c_order, default_init, source.dtype());
TENSORSTORE_RETURN_IF_ERROR(DownsampleTransformedArray(
source, TransformedArray(target), downsample_factors, method));
return target;
}
}
} | #include "tensorstore/driver/downsample/downsample_array.h"
#include <stdint.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::kImplicit;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::span;
using ::tensorstore::internal_downsample::DownsampleArray;
using ::tensorstore::internal_downsample::DownsampleTransformedArray;
using ::testing::Optional;
TEST(DownsampleArrayTest, MeanRank0) {
EXPECT_THAT(DownsampleArray(tensorstore::MakeScalarArray<float>(42.0),
span<const Index>(), DownsampleMethod::kMean),
Optional(tensorstore::MakeScalarArray<float>(42.0)));
}
TEST(DownsampleArrayTest, MeanRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5, 6})));
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 3, 5, 7, 12}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<float>({2, 8})));
}
TEST(DownsampleArrayTest, MeanRoundingUint8) {
EXPECT_THAT(DownsampleArray(MakeArray<uint8_t>({253, 254, 254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<uint8_t>({254})));
}
TEST(DownsampleArrayTest, MeanRoundingInt16) {
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-253, -254, -254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-254})));
}
TEST(DownsampleArrayTest, MeanRoundingToEvenInt16) {
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 2, 2}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({2})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 4, 4}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({4})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -2, -2}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-2})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -4, -4}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-4})));
}
TEST(DownsampleArrayTest, MeanRoundingUint64) {
EXPECT_THAT(DownsampleArray(MakeArray<uint64_t>({253, 254, 254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<uint64_t>({254})));
}
TEST(DownsampleArrayTest, MeanRoundingBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 0}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
}
TEST(DownsampleArrayTest, MeanRank1Offset) {
EXPECT_THAT(DownsampleArray(MakeOffsetArray<float>({1}, {1, 2, 5, 9}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1, 3.5, 9})));
}
TEST(DownsampleArrayTest, MeanRank1SingleDownsampledElement) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2}), span<const Index>({2}),
DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5})));
}
TEST(DownsampleArrayTest, MeanRank1NotExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7, 9}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5, 6, 9})));
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 6, 7, 9}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<float>({3, 8})));
}
TEST(DownsampleArrayTest, MeanRank1NoDownsampling) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}),
span<const Index>({1}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1, 2, 5, 7})));
}
TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim1) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({1, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{1.5, 6}, {5.5, 20}})));
}
TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim0) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({2, 1}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{3, 4, 10, 16}})));
}
TEST(DownsampleArrayTest, MeanRank2TwoDownsampleDims) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{3.5, 13.0}})));
}
TEST(DownsampleArrayTest, MeanRank2NotExactMultiple) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({
{4, 6, 7.5},
{11.5, 13.5, 15},
})));
}
TEST(DownsampleArrayTest, MeanRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15}}),
span<const Index>({2, 3}), DownsampleMethod::kMean),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {8.5, 10.5, 12.5}})));
}
TEST(DownsampleArrayTest, MedianRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15}}),
span<const Index>({2, 3}), DownsampleMethod::kMedian),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 9, 10}})));
}
TEST(DownsampleArrayTest, ModeRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8},
{
{1, 2, 3, 3, 5},
{6, 4, 5, 5, 10},
{11, 6, 6, 6, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kMode),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 6, 10}})));
}
TEST(DownsampleArrayTest, StrideRank2PartialEndBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({2, 6},
{
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kStride),
Optional(MakeOffsetArray<float>({1, 2}, {
{1, 4},
{11, 14},
})));
}
TEST(DownsampleArrayTest, StrideRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8},
{
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kStride),
Optional(MakeOffsetArray<float>({2, 3}, {
{7, 10},
})));
}
TEST(DownsampleArrayTest, MeanRank3ThreeDownsampleDims) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({{
{1, 2, 3, 4},
{5, 6, 7, 8},
{9, 10, 11, 12},
},
{
{13, 14, 15, 16},
{17, 18, 19, 20},
{21, 22, 23, 24},
},
{
{25, 26, 27, 28},
{29, 30, 31, 32},
{33, 34, 35, 36},
}}),
span<const Index>({2, 2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{
{9.5, 11.5},
{15.5, 17.5},
},
{
{27.5, 29.5},
{33.5, 35.5},
}})));
}
TEST(DownsampleArrayTest, MeanRank1ReversedExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({1, 2, 3, 4}) |
Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({3.5, 1.5})));
}
TEST(DownsampleArrayTest, MeanRank1ReversedNotExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({1, 2, 3, 4, 5}) |
Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({4.5, 2.5, 1})));
}
TEST(DownsampleArrayTest, MeanRank2ReversedNotExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}) |
Dims(0, 1).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({
{12, 10, 8.5},
{4.5, 2.5, 1},
})));
}
TEST(DownsampleArrayTest, MinRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}),
span<const Index>({2}), DownsampleMethod::kMin),
Optional(MakeArray<float>({2, 1})));
EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}),
span<const Index>({3}), DownsampleMethod::kMin),
Optional(MakeArray<int>({2, 1})));
}
TEST(DownsampleArrayTest, MaxRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}),
span<const Index>({2}), DownsampleMethod::kMax),
Optional(MakeArray<float>({3, 5})));
EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}),
span<const Index>({3}), DownsampleMethod::kMax),
Optional(MakeArray<int>({8, 7})));
}
TEST(DownsampleArrayTest, MedianRank1ExactMultiple) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 98, 97, 5}),
span<const Index>({4}), DownsampleMethod::kMedian),
Optional(MakeArray<float>({2, 97})));
}
TEST(DownsampleArrayTest, MedianRank1Partial) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 97, 98}),
span<const Index>({4}), DownsampleMethod::kMedian),
Optional(MakeArray<float>({2, 98})));
}
TEST(DownsampleArrayTest, ModeRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2, 2}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<float>({99, 2})));
}
TEST(DownsampleArrayTest, ModeRank1Partial) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<float>({99, 3})));
}
TEST(DownsampleArrayTest, ModeBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}),
span<const Index>({5}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, MeanBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}),
span<const Index>({5}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, MedianBool) {
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 0, 1, 1}), span<const Index>({4}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 1, 1, 1}), span<const Index>({4}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}), span<const Index>({5}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, ModeJson) {
using ::tensorstore::dtypes::json_t;
EXPECT_THAT(DownsampleArray(MakeArray<json_t>({"a", "a", 3.0, 3, 3u}),
span<const Index>({5}), DownsampleMethod::kMode),
Optional(MakeArray<::nlohmann::json>({json_t(3)})));
}
TEST(DownsampleArrayTest, MultipleBlocks) {
auto source_array = tensorstore::AllocateArray<uint8_t>({128, 128});
auto expected_downsampled = tensorstore::AllocateArray<uint8_t>({64, 64});
for (int i = 0; i < 128; ++i) {
for (int j = 0; j < 128; ++j) {
source_array(i, j) = static_cast<uint8_t>(i);
}
}
for (int i = 0; i < 64; ++i) {
for (int j = 0; j < 64; ++j) {
expected_downsampled(i, j) = static_cast<uint8_t>(i * 2);
}
}
EXPECT_THAT(DownsampleArray(source_array, {{2, 2}}, DownsampleMethod::kMean),
Optional(tensorstore::MatchesArray(expected_downsampled)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
83794754-4a41-4ddb-9315-284eacde22be | cpp | google/tensorstore | json_change_map | tensorstore/driver/json/json_change_map.cc | tensorstore/driver/json/json_change_map_test.cc | #include "tensorstore/driver/json/json_change_map.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/btree_map.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_json_driver {
Result<::nlohmann::json> JsonChangeMap::Apply(
const ::nlohmann::json& existing,
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it = map_.lower_bound(sub_value_pointer),
changes_end = map_.end();
if (changes_it != changes_end && changes_it->first == sub_value_pointer) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, changes_it->second};
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto* modified_value,
json_pointer::Dereference(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
json_pointer::kMustExist));
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, prev_it->first,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, *modified_value};
}
}
::nlohmann::json new_value;
{
TENSORSTORE_ASSIGN_OR_RETURN(
const ::nlohmann::json* restricted_existing,
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate));
if (restricted_existing) {
new_value = *restricted_existing;
} else {
new_value = ::nlohmann::json(::nlohmann::json::value_t::discarded);
}
}
for (; changes_it != changes_end &&
json_pointer::Compare(changes_it->first, sub_value_pointer) ==
json_pointer::kContainedIn;
++changes_it) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Replace(new_value,
std::string_view(changes_it->first)
.substr(sub_value_pointer.size()),
changes_it->second),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
}
return new_value;
}
bool JsonChangeMap::CanApplyUnconditionally(
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it;
if (sub_value_pointer.empty()) {
changes_it = map_.begin();
} else {
changes_it = map_.lower_bound(sub_value_pointer);
}
if (changes_it != map_.end()) {
if (changes_it->first == sub_value_pointer) {
return true;
}
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
return json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains;
}
return false;
}
absl::Status JsonChangeMap::AddChange(std::string_view sub_value_pointer,
::nlohmann::json sub_value) {
auto it = map_.lower_bound(sub_value_pointer);
if (it != map_.end()) {
auto compare_result = json_pointer::Compare(sub_value_pointer, it->first);
assert(compare_result <= json_pointer::kEqual);
if (compare_result == json_pointer::kEqual) {
it->second = std::move(sub_value);
return absl::OkStatus();
}
while (compare_result == json_pointer::kContains) {
it = map_.erase(it);
if (it == map_.end()) break;
compare_result = json_pointer::Compare(sub_value_pointer, it->first);
}
}
if (it != map_.begin()) {
auto prev_it = std::prev(it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
return json_pointer::Replace(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
std::move(sub_value));
}
}
map_.try_emplace(it, std::string(sub_value_pointer), std::move(sub_value));
return absl::OkStatus();
}
}
} | #include "tensorstore/driver/json/json_change_map.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_json_driver::JsonChangeMap;
using ::testing::ElementsAre;
using ::testing::Optional;
using ::testing::Pair;
TEST(JsonChangeMapTest, AddChangeValid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(false)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", true));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(true)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}, {"a", false}}))));
}
TEST(JsonChangeMapTest, AddChangeValidIndependent) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/e", "xx"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/a", "yy"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/a", MatchesJson("yy")),
Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}})),
Pair("/a/e", MatchesJson("xx"))));
}
TEST(JsonChangeMapTest, AddChangeInvalid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.AddChange("/a/b", 43),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyEmptyChangeMap) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}),
Optional(MatchesJson(::nlohmann::json{{"x", "y"}, {"z", "w"}})));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}, "/x"),
Optional(MatchesJson(::nlohmann::json("y"))));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", {{"b", {{"c", 42}}}}}}));
EXPECT_THAT(changes.Apply("old", "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyInvalidContainingChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply(false, "/a/b/c"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyChangeMapPriorNonContaining) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 10));
EXPECT_THAT(changes.Apply({{"b", 42}}, "/b"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply({{"e", "f"}}, "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"e", "f"}}),
Optional(MatchesJson(::nlohmann::json{
{"a", {{"b", {{"c", 42}}}}}, {"e", 42}, {"x", "y"}})));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply(42),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/4", 42));
EXPECT_THAT(changes.Apply({1, 2, 3}),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointer) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointerNoChanges) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestNewMember) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(::nlohmann::json::object_t{}, "/a"),
Optional(MatchesJson(::nlohmann::json{{"b", 42}})));
}
TEST(JsonChangeMapTest, ApplyIncompatibleChangeExactRequest) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, AddIncompatibleChanges) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", 42));
EXPECT_THAT(changes.AddChange("/a", 50),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/a\" cannot be applied "
"to number value: 42"));
}
TEST(JsonChangeMapTest, CanApplyUnconditionally) {
JsonChangeMap changes;
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/b/c"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"c", 42}}));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/c"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/d"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/x"));
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", false}}));
EXPECT_TRUE(changes.CanApplyUnconditionally(""));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/json/json_change_map.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/json/json_change_map_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ddcc6814-334a-4758-8a63-84abf1615473 | cpp | google/tensorstore | zlib_compressor | tensorstore/internal/compression/zlib_compressor.cc | tensorstore/driver/zarr/zlib_compressor_test.cc | #include "tensorstore/internal/compression/zlib_compressor.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "riegeli/bytes/writer.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zlib/zlib_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> ZlibCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::ZlibWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
if (level != -1) options.set_compression_level(level);
options.set_header(use_gzip_header ? Writer::Header::kGzip
: Writer::Header::kZlib);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> ZlibCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::ZlibReader<std::unique_ptr<riegeli::Reader>>;
Reader::Options options;
options.set_header(use_gzip_header ? Reader::Header::kGzip
: Reader::Header::kZlib);
return std::make_unique<Reader>(std::move(base_reader), options);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/driver/zarr/compressor.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::Compressor;
class ZlibCompressorTest : public ::testing::TestWithParam<const char*> {};
INSTANTIATE_TEST_SUITE_P(ZlibCompressorTestCases, ZlibCompressorTest,
::testing::Values("zlib", "gzip"));
TEST_P(ZlibCompressorTest, SmallRoundtrip) {
auto compressor =
Compressor::FromJson({{"id", GetParam()}, {"level", 6}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, DefaultLevel) {
auto compressor1 = Compressor::FromJson({{"id", GetParam()}}).value();
auto compressor2 =
Compressor::FromJson({{"id", GetParam()}, {"level", 1}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor1->Encode(input, &encode_result1, 1));
TENSORSTORE_ASSERT_OK(compressor2->Encode(input, &encode_result2, 1));
EXPECT_EQ(encode_result1, encode_result2);
}
TEST_P(ZlibCompressorTest, NonDefaultLevel) {
auto compressor =
Compressor::FromJson({{"id", GetParam()}, {"level", 9}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, InvalidParameter) {
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"level", "6"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"level", -1}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"level", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"foo", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Object includes extra members: \"foo\""));
}
TEST_P(ZlibCompressorTest, ToJson) {
auto compressor =
Compressor::FromJson({{"id", GetParam()}, {"level", 5}}).value();
EXPECT_EQ(nlohmann::json({{"id", GetParam()}, {"level", 5}}),
compressor.ToJson());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zlib_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/zlib_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
afae6ffd-8b55-4046-b9af-01091e4ddae9 | cpp | google/tensorstore | compressor | tensorstore/driver/n5/compressor.cc | tensorstore/driver/zarr/compressor_test.cc | #include "tensorstore/driver/n5/compressor.h"
#include <utility>
#include "absl/base/no_destructor.h"
#include "tensorstore/driver/n5/compressor_registry.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_registry.h"
namespace tensorstore {
namespace internal_n5 {
using CompressorRegistry = internal::JsonSpecifiedCompressor::Registry;
CompressorRegistry& GetCompressorRegistry() {
static absl::NoDestructor<CompressorRegistry> registry;
return *registry;
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(Compressor, [](auto is_loading,
const auto& options,
auto* obj,
::nlohmann::json* j) {
namespace jb = tensorstore::internal_json_binding;
auto& registry = GetCompressorRegistry();
return jb::Object(
jb::Member("type",
jb::MapValue(registry.KeyBinder(),
std::make_pair(Compressor{}, "raw"))),
registry.RegisteredObjectBinder())(is_loading, options, obj, j);
})
}
} | #include "tensorstore/driver/zarr/compressor.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::Compressor;
TEST(ParseCompressorTest, Null) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto compressor,
Compressor::FromJson(nullptr));
EXPECT_EQ(nullptr, ::nlohmann::json(compressor));
}
TEST(ParseCompressorTest, ZlibSuccess) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto compressor, Compressor::FromJson({{"id", "zlib"}, {"level", 5}}));
EXPECT_EQ((::nlohmann::json{{"id", "zlib"}, {"level", 5}}),
::nlohmann::json(compressor));
}
TEST(ParseCompressorTest, ZlibFailure) {
EXPECT_THAT(
Compressor::FromJson(::nlohmann::json{{"id", "zlib"}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
}
TEST(ParseCompressorTest, UnsupportedId) {
EXPECT_THAT(
Compressor::FromJson(::nlohmann::json{{"id", "invalid"}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"\"invalid\" is not registered"));
}
TEST(ParseCompressorTest, InvalidId) {
EXPECT_THAT(Compressor::FromJson(::nlohmann::json{{"id", 5}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"Expected string, but received: 5"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1352048f-deb8-481b-a19a-318989c453e4 | cpp | google/tensorstore | bzip2_compressor | tensorstore/internal/compression/bzip2_compressor.cc | tensorstore/driver/n5/bzip2_compressor_test.cc | #include "tensorstore/internal/compression/bzip2_compressor.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/bzip2/bzip2_reader.h"
#include "riegeli/bzip2/bzip2_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> Bzip2Compressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::Bzip2Writer<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_compression_level(level);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> Bzip2Compressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::Bzip2Reader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<Reader>(std::move(base_reader));
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(Bzip2CompressionTest, Parse) {
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<Compressor>({
{{{"type", "bzip2"}}, {{"type", "bzip2"}, {"blockSize", 9}}},
{{{"type", "bzip2"}, {"blockSize", 3}},
{{"type", "bzip2"}, {"blockSize", 3}}},
});
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"extra", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(Bzip2CompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x42, 0x5a, 0x68, 0x39,
0x31, 0x41, 0x59, 0x26,
0x53, 0x59, 0x02, 0x3e,
0x0d, 0xd2, 0x00, 0x00,
0x00, 0x40, 0x00, 0x7f,
0x00, 0x20, 0x00, 0x31,
0x0c, 0x01, 0x0d, 0x31,
0xa8, 0x73, 0x94, 0x33,
0x7c, 0x5d, 0xc9, 0x14,
0xe1, 0x42, 0x40, 0x08,
0xf8, 0x37, 0x48,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata,
N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression", {{"type", "bzip2"}}}}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/bzip2_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/bzip2_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1ceb4a07-5c78-4d1a-9f00-874e52495ac2 | cpp | google/tensorstore | blosc_compressor | tensorstore/internal/compression/blosc_compressor.cc | tensorstore/driver/n5/blosc_compressor_test.cc | #include "tensorstore/internal/compression/blosc_compressor.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "riegeli/base/chain.h"
#include "riegeli/bytes/chain_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/read_all.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/write.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/compression/blosc.h"
namespace tensorstore {
namespace internal {
namespace {
class BloscDeferredWriter : public riegeli::CordWriter<absl::Cord> {
public:
explicit BloscDeferredWriter(blosc::Options options,
std::unique_ptr<riegeli::Writer> base_writer)
: CordWriter(riegeli::CordWriterBase::Options().set_max_block_size(
std::numeric_limits<size_t>::max())),
options_(std::move(options)),
base_writer_(std::move(base_writer)) {}
void Done() override {
CordWriter::Done();
auto output = blosc::Encode(dest().Flatten(), options_);
if (!output.ok()) {
Fail(std::move(output).status());
return;
}
auto status = riegeli::Write(*std::move(output), std::move(base_writer_));
if (!status.ok()) {
Fail(std::move(status));
return;
}
}
private:
blosc::Options options_;
std::unique_ptr<riegeli::Writer> base_writer_;
};
}
std::unique_ptr<riegeli::Writer> BloscCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
return std::make_unique<BloscDeferredWriter>(
blosc::Options{codec.c_str(), level, shuffle, blocksize, element_bytes},
std::move(base_writer));
}
std::unique_ptr<riegeli::Reader> BloscCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
auto output = riegeli::ReadAll(
std::move(base_reader),
[](absl::string_view input) -> absl::StatusOr<std::string> {
auto output = blosc::Decode(input);
if (!output.ok()) return std::move(output).status();
return *std::move(output);
});
auto reader = std::make_unique<riegeli::ChainReader<riegeli::Chain>>(
output.ok() ? riegeli::Chain(*std::move(output)) : riegeli::Chain());
if (!output.ok()) {
reader->Fail(std::move(output).status());
}
return reader;
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(BloscCompressionTest, Parse) {
for (auto codec : {"lz4", "blosclz", "lz4hc", "snappy", "zlib", "zstd"}) {
for (int level = 0; level <= 9; ++level) {
for (int shuffle = 0; shuffle <= 2; ++shuffle) {
for (int blocksize : {0, 256}) {
::nlohmann::json j{{"type", "blosc"},
{"cname", codec},
{"shuffle", shuffle},
{"clevel", level},
{"blocksize", blocksize}};
tensorstore::TestJsonBinderRoundTripJsonOnly<Compressor>({j});
}
}
}
}
EXPECT_THAT(
Compressor::FromJson({{"type", "blosc"}, {"shuffle", 0}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"shuffle", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
Compressor::FromJson(
{{"type", "blosc"}, {"cname", 3}, {"shuffle", 0}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "invalid"},
{"shuffle", 0},
{"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", -1}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", -1},
{"clevel", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"shuffle", 3}, {"clevel", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", 3},
{"extra", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressionTest, RoundTrip) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata, N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression",
{{"type", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"shuffle", 0}}}}));
auto array = MakeArray<uint16_t>({{{1, 2, 3}, {4, 5, 6}}});
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
TEST(BloscCompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x02, 0x01, 0x96, 0x02, 0x0c, 0x00, 0x00, 0x00, 0x0c, 0x00,
0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02,
0x00, 0x03, 0x00, 0x04, 0x00, 0x05, 0x00, 0x06,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
N5Metadata::FromJson({
{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression",
{
{"type", "blosc"},
{"clevel", 3},
{"blocksize", 0},
{"cname", "zstd"},
{"shuffle", 2},
}},
}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/blosc_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/blosc_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b3ef2df2-c7c5-4ea3-a7ab-56928ca266f3 | cpp | google/tensorstore | dtype | tensorstore/driver/zarr/dtype.cc | tensorstore/driver/zarr/dtype_test.cc | #include "tensorstore/driver/zarr/dtype.h"
#include <stddef.h>
#include "absl/base/optimization.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr {
constexpr char kDtypeBfloat16[] = "bfloat16";
constexpr char kDtypeFloat8e4m3fn[] = "float8_e4m3fn";
constexpr char kDtypeFloat8e4m3fnuz[] = "float8_e4m3fnuz";
constexpr char kDtypeFloat8e4m3b11fnuz[] = "float8_e4m3b11fnuz";
constexpr char kDtypeFloat8e5m2[] = "float8_e5m2";
constexpr char kDtypeFloat8e5m2fnuz[] = "float8_e5m2fnuz";
constexpr char kDtypeInt4[] = "int4";
Result<ZarrDType::BaseDType> ParseBaseDType(std::string_view dtype) {
using D = ZarrDType::BaseDType;
if (dtype == kDtypeBfloat16) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::bfloat16_t>,
endian::little};
}
if (dtype == kDtypeFloat8e4m3fn) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3fn_t>, endian::little};
}
if (dtype == kDtypeFloat8e4m3fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3fnuz_t>, endian::little};
}
if (dtype == kDtypeFloat8e4m3b11fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3b11fnuz_t>,
endian::little};
}
if (dtype == kDtypeFloat8e5m2) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::float8_e5m2_t>,
endian::little};
}
if (dtype == kDtypeFloat8e5m2fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e5m2fnuz_t>, endian::little};
}
if (dtype == kDtypeInt4) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::int4_t>,
endian::little};
}
if (dtype.size() < 3) goto error;
{
const char endian_indicator = dtype[0];
const char type_indicator = dtype[1];
const std::string_view suffix = dtype.substr(2);
endian endian_value;
switch (endian_indicator) {
case '<':
endian_value = endian::little;
break;
case '>':
endian_value = endian::big;
break;
case '|':
endian_value = endian::native;
break;
default:
goto error;
}
switch (type_indicator) {
case 'b':
if (suffix != "1") goto error;
ABSL_FALLTHROUGH_INTENDED;
case 'S':
case 'V':
endian_value = endian::native;
break;
case 'i':
case 'u':
if (endian_indicator == '|') {
if (suffix != "1") goto error;
endian_value = endian::native;
break;
} else if (suffix == "1") {
endian_value = endian::native;
break;
}
[[fallthrough]];
case 'f':
case 'c':
case 'm':
case 'M':
if (endian_indicator == '|') {
goto error;
}
break;
}
switch (type_indicator) {
case 'b':
return D{std::string(dtype), dtype_v<bool>, endian::native};
case 'i':
if (suffix == "1") {
return D{std::string(dtype), dtype_v<int8_t>, endian_value};
}
if (suffix == "2") {
return D{std::string(dtype), dtype_v<int16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype), dtype_v<int32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype), dtype_v<int64_t>, endian_value};
}
goto error;
case 'u':
if (suffix == "1") {
return D{std::string(dtype), dtype_v<uint8_t>, endian_value};
}
if (suffix == "2") {
return D{std::string(dtype), dtype_v<uint16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype), dtype_v<uint32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype), dtype_v<uint64_t>, endian_value};
}
goto error;
case 'f':
if (suffix == "2") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float64_t>, endian_value};
}
goto error;
case 'c':
if (suffix == "8") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::complex64_t>, endian_value};
}
if (suffix == "16") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::complex128_t>, endian_value};
}
goto error;
case 'S':
case 'V': {
Index num_elements = 0;
for (char c : suffix) {
if (internal::MulOverflow(num_elements, Index(10), &num_elements))
goto error;
if (c < '0' || c > '9') goto error;
if (internal::AddOverflow(num_elements, Index(c - '0'),
&num_elements))
goto error;
}
return D{std::string(dtype),
(type_indicator == 'S')
? DataType(dtype_v<::tensorstore::dtypes::char_t>)
: DataType(dtype_v<::tensorstore::dtypes::byte_t>),
endian::native,
{num_elements}};
}
}
}
error:
return absl::InvalidArgumentError(
tensorstore::StrCat("Unsupported zarr dtype: ", QuoteString(dtype)));
}
namespace {
Result<ZarrDType> ParseDTypeNoDerived(const nlohmann::json& value) {
ZarrDType out;
if (value.is_string()) {
out.has_fields = false;
out.fields.resize(1);
TENSORSTORE_ASSIGN_OR_RETURN(
static_cast<ZarrDType::BaseDType&>(out.fields[0]),
ParseBaseDType(value.get<std::string>()));
return out;
}
out.has_fields = true;
auto parse_result = internal_json::JsonParseArray(
value,
[&](std::ptrdiff_t size) {
out.fields.resize(size);
return absl::OkStatus();
},
[&](const ::nlohmann::json& x, std::ptrdiff_t field_i) {
auto& field = out.fields[field_i];
return internal_json::JsonParseArray(
x,
[&](std::ptrdiff_t size) {
if (size < 2 || size > 3) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected array of size 2 or 3, but received: ", x.dump()));
}
return absl::OkStatus();
},
[&](const ::nlohmann::json& v, std::ptrdiff_t i) {
switch (i) {
case 0:
if (internal_json::JsonRequireValueAs(v, &field.name).ok()) {
if (!field.name.empty()) return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected non-empty string, but received: ", v.dump()));
case 1: {
std::string dtype_string;
TENSORSTORE_RETURN_IF_ERROR(
internal_json::JsonRequireValueAs(v, &dtype_string));
TENSORSTORE_ASSIGN_OR_RETURN(
static_cast<ZarrDType::BaseDType&>(field),
ParseBaseDType(dtype_string));
return absl::OkStatus();
}
case 2: {
return internal_json::JsonParseArray(
v,
[&](std::ptrdiff_t size) {
field.outer_shape.resize(size);
return absl::OkStatus();
},
[&](const ::nlohmann::json& x, std::ptrdiff_t j) {
return internal_json::JsonRequireInteger(
x, &field.outer_shape[j], true, 1,
kInfIndex);
});
}
default:
ABSL_UNREACHABLE();
}
});
});
if (!parse_result.ok()) return parse_result;
return out;
}
}
absl::Status ValidateDType(ZarrDType& dtype) {
dtype.bytes_per_outer_element = 0;
for (size_t field_i = 0; field_i < dtype.fields.size(); ++field_i) {
auto& field = dtype.fields[field_i];
if (std::any_of(
dtype.fields.begin(), dtype.fields.begin() + field_i,
[&](const ZarrDType::Field& f) { return f.name == field.name; })) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Field name ", QuoteString(field.name), " occurs more than once"));
}
field.field_shape.resize(field.flexible_shape.size() +
field.outer_shape.size());
std::copy(field.flexible_shape.begin(), field.flexible_shape.end(),
std::copy(field.outer_shape.begin(), field.outer_shape.end(),
field.field_shape.begin()));
field.num_inner_elements = ProductOfExtents(span(field.field_shape));
if (field.num_inner_elements == std::numeric_limits<Index>::max()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Product of dimensions ", span(field.field_shape), " is too large"));
}
if (internal::MulOverflow(field.num_inner_elements,
static_cast<Index>(field.dtype->size),
&field.num_bytes)) {
return absl::InvalidArgumentError("Field size in bytes is too large");
}
field.byte_offset = dtype.bytes_per_outer_element;
if (internal::AddOverflow(dtype.bytes_per_outer_element, field.num_bytes,
&dtype.bytes_per_outer_element)) {
return absl::InvalidArgumentError(
"Total number of bytes per outer array element is too large");
}
}
return absl::OkStatus();
}
Result<ZarrDType> ParseDType(const nlohmann::json& value) {
TENSORSTORE_ASSIGN_OR_RETURN(ZarrDType dtype, ParseDTypeNoDerived(value));
TENSORSTORE_RETURN_IF_ERROR(ValidateDType(dtype));
return dtype;
}
void to_json(::nlohmann::json& out, const ZarrDType::Field& field) {
using array_t = ::nlohmann::json::array_t;
if (field.outer_shape.empty()) {
out = array_t{field.name, field.encoded_dtype};
} else {
out = array_t{field.name, field.encoded_dtype, field.outer_shape};
}
}
void to_json(::nlohmann::json& out,
const ZarrDType& dtype) {
if (!dtype.has_fields) {
out = dtype.fields[0].encoded_dtype;
} else {
out = dtype.fields;
}
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrDType, [](auto is_loading,
const auto& options,
auto* obj, auto* j) {
if constexpr (is_loading) {
TENSORSTORE_ASSIGN_OR_RETURN(*obj, ParseDType(*j));
} else {
to_json(*j, *obj);
}
return absl::OkStatus();
})
char EndianIndicator(tensorstore::endian e) {
return e == tensorstore::endian::little ? '<' : '>';
}
Result<ZarrDType::BaseDType> ChooseBaseDType(DataType dtype) {
ZarrDType::BaseDType base_dtype;
base_dtype.endian = endian::native;
base_dtype.dtype = dtype;
const auto set_typestr = [&](std::string_view typestr, int size) {
if (size > 1) {
base_dtype.encoded_dtype = tensorstore::StrCat(
EndianIndicator(base_dtype.endian), typestr, size);
} else {
base_dtype.encoded_dtype = tensorstore::StrCat("|", typestr, size);
}
};
switch (dtype.id()) {
case DataTypeId::bool_t:
set_typestr("b", 1);
break;
case DataTypeId::uint8_t:
set_typestr("u", 1);
break;
case DataTypeId::uint16_t:
set_typestr("u", 2);
break;
case DataTypeId::uint32_t:
set_typestr("u", 4);
break;
case DataTypeId::uint64_t:
set_typestr("u", 8);
break;
case DataTypeId::int4_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeInt4;
break;
case DataTypeId::int8_t:
set_typestr("i", 1);
break;
case DataTypeId::int16_t:
set_typestr("i", 2);
break;
case DataTypeId::int32_t:
set_typestr("i", 4);
break;
case DataTypeId::int64_t:
set_typestr("i", 8);
break;
case DataTypeId::float8_e4m3fn_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3fn;
break;
case DataTypeId::float8_e4m3fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3fnuz;
break;
case DataTypeId::float8_e4m3b11fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3b11fnuz;
break;
case DataTypeId::float8_e5m2_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e5m2;
break;
case DataTypeId::float8_e5m2fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e5m2fnuz;
break;
case DataTypeId::float16_t:
set_typestr("f", 2);
break;
case DataTypeId::bfloat16_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeBfloat16;
break;
case DataTypeId::float32_t:
set_typestr("f", 4);
break;
case DataTypeId::float64_t:
set_typestr("f", 8);
break;
case DataTypeId::complex64_t:
set_typestr("c", 8);
break;
case DataTypeId::complex128_t:
set_typestr("c", 16);
break;
default:
return absl::InvalidArgumentError(
tensorstore::StrCat("Data type not supported: ", dtype));
}
return base_dtype;
}
}
} | #include "tensorstore/driver/zarr/dtype.h"
#include <stdint.h>
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr/metadata_testutil.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::endian;
using ::tensorstore::Index;
using ::tensorstore::kInfIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::ChooseBaseDType;
using ::tensorstore::internal_zarr::ParseBaseDType;
using ::tensorstore::internal_zarr::ParseDType;
using ::tensorstore::internal_zarr::ZarrDType;
void CheckBaseDType(std::string dtype, DataType r, endian e,
std::vector<Index> flexible_shape) {
EXPECT_THAT(ParseBaseDType(dtype), ::testing::Optional(ZarrDType::BaseDType{
dtype, r, e, flexible_shape}))
<< dtype;
}
TEST(ParseBaseDType, Success) {
CheckBaseDType("|b1", dtype_v<bool>, endian::native, {});
CheckBaseDType("<b1", dtype_v<bool>, endian::native, {});
CheckBaseDType(">b1", dtype_v<bool>, endian::native, {});
CheckBaseDType("|S150", dtype_v<char>, endian::native, {150});
CheckBaseDType(">S150", dtype_v<char>, endian::native, {150});
CheckBaseDType("<S150", dtype_v<char>, endian::native, {150});
CheckBaseDType("|S9223372036854775807", dtype_v<char>, endian::native,
{9223372036854775807});
CheckBaseDType("|V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType("<V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType(">V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType("|i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType("<i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType(">i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType("|u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType("<u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType(">u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType("<i2", dtype_v<std::int16_t>, endian::little, {});
CheckBaseDType("<i4", dtype_v<std::int32_t>, endian::little, {});
CheckBaseDType("<i8", dtype_v<std::int64_t>, endian::little, {});
CheckBaseDType("<u2", dtype_v<std::uint16_t>, endian::little, {});
CheckBaseDType("<u4", dtype_v<std::uint32_t>, endian::little, {});
CheckBaseDType("<u8", dtype_v<std::uint64_t>, endian::little, {});
CheckBaseDType(">i2", dtype_v<std::int16_t>, endian::big, {});
CheckBaseDType(">i4", dtype_v<std::int32_t>, endian::big, {});
CheckBaseDType(">i8", dtype_v<std::int64_t>, endian::big, {});
CheckBaseDType(">u2", dtype_v<std::uint16_t>, endian::big, {});
CheckBaseDType(">u4", dtype_v<std::uint32_t>, endian::big, {});
CheckBaseDType(">u8", dtype_v<std::uint64_t>, endian::big, {});
CheckBaseDType("float8_e4m3fn", dtype_v<tensorstore::dtypes::float8_e4m3fn_t>,
endian::little, {});
CheckBaseDType("float8_e4m3fnuz",
dtype_v<tensorstore::dtypes::float8_e4m3fnuz_t>,
endian::little, {});
CheckBaseDType("float8_e4m3b11fnuz",
dtype_v<tensorstore::dtypes::float8_e4m3b11fnuz_t>,
endian::little, {});
CheckBaseDType("float8_e5m2", dtype_v<tensorstore::dtypes::float8_e5m2_t>,
endian::little, {});
CheckBaseDType("float8_e5m2fnuz",
dtype_v<tensorstore::dtypes::float8_e5m2fnuz_t>,
endian::little, {});
CheckBaseDType("<f2", dtype_v<tensorstore::dtypes::float16_t>, endian::little,
{});
CheckBaseDType("bfloat16", dtype_v<tensorstore::dtypes::bfloat16_t>,
endian::little, {});
CheckBaseDType("<f4", dtype_v<tensorstore::dtypes::float32_t>, endian::little,
{});
CheckBaseDType("<f8", dtype_v<tensorstore::dtypes::float64_t>, endian::little,
{});
CheckBaseDType(">f2", dtype_v<tensorstore::dtypes::float16_t>, endian::big,
{});
CheckBaseDType(">f4", dtype_v<tensorstore::dtypes::float32_t>, endian::big,
{});
CheckBaseDType(">f8", dtype_v<tensorstore::dtypes::float64_t>, endian::big,
{});
CheckBaseDType("<c8", dtype_v<tensorstore::dtypes::complex64_t>,
endian::little, {});
CheckBaseDType("<c16", dtype_v<tensorstore::dtypes::complex128_t>,
endian::little, {});
CheckBaseDType(">c8", dtype_v<tensorstore::dtypes::complex64_t>, endian::big,
{});
CheckBaseDType(">c16", dtype_v<tensorstore::dtypes::complex128_t>,
endian::big, {});
}
TEST(ParseBaseDType, Failure) {
EXPECT_THAT(ParseBaseDType(""),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Unsupported zarr dtype: \"\""));
EXPECT_THAT(ParseBaseDType("|f4"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|f8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|c8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|c16"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|b2"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|i2"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<i9"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<u9"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<S"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S999999999999999999999999999"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S9223372036854775808"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|Sa"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S "),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<f5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<c5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<m8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<M8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<X5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
void CheckDType(const ::nlohmann::json& json, const ZarrDType& expected) {
SCOPED_TRACE(json.dump());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto dtype, ParseDType(json));
EXPECT_EQ(expected, dtype);
EXPECT_EQ(json, ::nlohmann::json(dtype));
}
TEST(ParseDType, SimpleStringBool) {
CheckDType("|b1", ZarrDType{
false,
{
{{
"|b1",
dtype_v<bool>,
endian::native,
{},
},
{},
"",
{},
1,
0,
1},
},
1,
});
}
TEST(ParseDType, SingleNamedFieldChar) {
CheckDType(::nlohmann::json::array_t{{"x", "|S10"}},
ZarrDType{
true,
{
{{
"|S10",
dtype_v<char>,
endian::native,
{10},
},
{},
"x",
{10},
10,
0,
10},
},
10,
});
}
TEST(ParseDType, TwoNamedFieldsCharAndInt) {
CheckDType(
::nlohmann::json::array_t{{"x", "|S10", {2, 3}}, {"y", "<i2", {5}}},
ZarrDType{
true,
{
{{
"|S10",
dtype_v<char>,
endian::native,
{10},
},
{2, 3},
"x",
{2, 3, 10},
10 * 2 * 3,
0,
10 * 2 * 3},
{{
"<i2",
dtype_v<std::int16_t>,
endian::little,
{},
},
{5},
"y",
{5},
5,
10 * 2 * 3,
2 * 5},
},
10 * 2 * 3 + 2 * 5,
});
}
TEST(ParseDType, FieldSpecTooShort) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x"}}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Expected array of size 2 or 3, but received: \\[\"x\"\\]"));
}
TEST(ParseDType, FieldSpecTooLong) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<i2", {2, 3}, 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Expected array of size 2 or 3, but received: "
"\\[\"x\",\"<i2\",\\[2,3\\],5\\]"));
}
TEST(ParseDType, InvalidFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{3, "<i2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 0: "
"Expected non-empty string, but received: 3"));
}
TEST(ParseDType, EmptyFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"", "<i2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 0: "
"Expected non-empty string, but received: \"\""));
}
TEST(ParseDType, DuplicateFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<i2"}, {"x", "<u2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Field name \"x\" occurs more than once"));
}
TEST(ParseDType, NonStringFieldBaseDType) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 1: "
"Expected string, but received: 3"));
}
TEST(ParseDType, InvalidFieldBaseDType) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<X2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 1: "
"Unsupported zarr dtype: \"<X2\""));
}
TEST(ParseDType, ProductOfDimensionsOverflow) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{
{"x", "|i1", {kInfIndex, kInfIndex}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Product of dimensions .* is too large"));
}
TEST(ParseDType, FieldSizeInBytesOverflow) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<f8", {kInfIndex}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Field size in bytes is too large"));
}
TEST(ParseDType, BytesPerOuterElementOverflow) {
EXPECT_THAT(
ParseDType(::nlohmann::json::array_t{{"x", "<i2", {kInfIndex}},
{"y", "<i2", {kInfIndex}}}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Total number of bytes per outer array element is too large"));
}
TEST(ChooseBaseDTypeTest, RoundTrip) {
constexpr tensorstore::DataType kSupportedDataTypes[] = {
dtype_v<bool>,
dtype_v<uint8_t>,
dtype_v<uint16_t>,
dtype_v<uint32_t>,
dtype_v<uint64_t>,
dtype_v<int8_t>,
dtype_v<int16_t>,
dtype_v<int32_t>,
dtype_v<int64_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3fn_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3fnuz_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3b11fnuz_t>,
dtype_v<::tensorstore::dtypes::float8_e5m2_t>,
dtype_v<::tensorstore::dtypes::float8_e5m2fnuz_t>,
dtype_v<::tensorstore::dtypes::float16_t>,
dtype_v<::tensorstore::dtypes::bfloat16_t>,
dtype_v<::tensorstore::dtypes::float32_t>,
dtype_v<::tensorstore::dtypes::float64_t>,
dtype_v<::tensorstore::dtypes::complex64_t>,
dtype_v<::tensorstore::dtypes::complex128_t>,
};
for (auto dtype : kSupportedDataTypes) {
SCOPED_TRACE(tensorstore::StrCat("dtype=", dtype));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_zarr_dtype,
ChooseBaseDType(dtype));
EXPECT_EQ(dtype, base_zarr_dtype.dtype);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto parsed, ParseBaseDType(base_zarr_dtype.encoded_dtype));
EXPECT_EQ(dtype, parsed.dtype);
EXPECT_EQ(base_zarr_dtype.endian, parsed.endian);
EXPECT_EQ(base_zarr_dtype.flexible_shape, parsed.flexible_shape);
EXPECT_EQ(base_zarr_dtype.encoded_dtype, parsed.encoded_dtype);
}
}
TEST(ChooseBaseDTypeTest, Invalid) {
struct X {};
EXPECT_THAT(ChooseBaseDType(dtype_v<X>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type not supported: .*"));
EXPECT_THAT(ChooseBaseDType(dtype_v<::tensorstore::dtypes::string_t>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type not supported: string"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/dtype.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/dtype_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2d57c5af-e081-4997-99fd-8108d330293d | cpp | google/tensorstore | zstd_compressor | tensorstore/internal/compression/zstd_compressor.cc | tensorstore/driver/n5/zstd_compressor_test.cc | #include "tensorstore/internal/compression/zstd_compressor.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "riegeli/bytes/writer.h"
#include "riegeli/zstd/zstd_reader.h"
#include "riegeli/zstd/zstd_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> ZstdCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::ZstdWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_compression_level(level);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> ZstdCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::ZstdReader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<Reader>(std::move(base_reader));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_n5::Compressor;
TEST(ZstdCompressorTest, SmallRoundtrip) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 6}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST(ZstdCompressorTest, DefaultLevel) {
auto compressor1 = Compressor::FromJson({{"type", "zstd"}}).value();
auto compressor2 =
Compressor::FromJson({{"type", "zstd"}, {"level", 1}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor1->Encode(input, &encode_result1, 1));
TENSORSTORE_ASSERT_OK(compressor2->Encode(input, &encode_result2, 1));
EXPECT_EQ(encode_result1, encode_result2);
}
TEST(ZstdCompressorTest, NonDefaultLevel) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 9}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST(ZstdCompressorTest, InvalidParameter) {
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", "6"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", -131073}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", 23}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"foo", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Object includes extra members: \"foo\""));
}
TEST(ZstdCompressorTest, ToJson) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 5}}).value();
EXPECT_EQ(nlohmann::json({{"type", "zstd"}, {"level", 5}}),
compressor.ToJson());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zstd_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/zstd_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
807f7079-32eb-4aab-bf79-500ab8f1a70c | cpp | google/tensorstore | gzip_compressor | tensorstore/driver/n5/gzip_compressor.cc | tensorstore/driver/n5/gzip_compressor_test.cc | #include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/compressor_registry.h"
#include "tensorstore/internal/compression/zlib_compressor.h"
#include "tensorstore/internal/json_binding/json_binding.h"
namespace tensorstore {
namespace internal_n5 {
namespace {
struct Registration {
Registration() {
using internal::ZlibCompressor;
namespace jb = tensorstore::internal_json_binding;
RegisterCompressor<ZlibCompressor>(
"gzip",
jb::Object(
jb::Member(
"level",
jb::Projection(
&ZlibCompressor::level,
jb::DefaultValue<jb::kAlwaysIncludeDefaults>(
[](auto* v) { *v = -1; }, jb::Integer<int>(-1, 9)))),
jb::Member(
"useZlib",
jb::Projection(
&ZlibCompressor::use_gzip_header,
jb::GetterSetter(
[](bool use_gzip) { return !use_gzip; },
[](bool& use_gzip, bool use_zlib) {
use_gzip = !use_zlib;
},
jb::DefaultValue<jb::kAlwaysIncludeDefaults>(
[](bool* use_zlib) { *use_zlib = false; }))))));
}
} registration;
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(GzipCompressionTest, Parse) {
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<Compressor>({
{{{"type", "gzip"}},
{{"type", "gzip"}, {"level", -1}, {"useZlib", false}}},
{{{"type", "gzip"}, {"level", 3}},
{{"type", "gzip"}, {"level", 3}, {"useZlib", false}}},
{{{"type", "gzip"}, {"useZlib", true}},
{{"type", "gzip"}, {"level", -1}, {"useZlib", true}}},
{
{{"type", "gzip"}, {"level", 3}, {"useZlib", false}},
{{"type", "gzip"}, {"level", 3}, {"useZlib", false}},
},
});
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"level", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"level", -2}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"level", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"useZlib", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "gzip"}, {"extra", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GzipCompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x1f, 0x8b, 0x08, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x63, 0x60,
0x64, 0x60, 0x62, 0x60,
0x66, 0x60, 0x61, 0x60,
0x65, 0x60, 0x03, 0x00,
0xaa, 0xea, 0x6d, 0xbf,
0x0c, 0x00, 0x00, 0x00,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata,
N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression", {{"type", "gzip"}}}}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/gzip_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/gzip_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d01b82b0-bed4-4e1d-aee4-3f6f5100cdea | cpp | google/tensorstore | xz_compressor | tensorstore/internal/compression/xz_compressor.cc | tensorstore/internal/compression/xz_compressor_test.cc | #include "tensorstore/internal/compression/xz_compressor.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/xz/xz_reader.h"
#include "riegeli/xz/xz_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> XzCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::XzWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_container(Writer::Container::kXz);
options.set_check(static_cast<Writer::Check>(check));
options.set_compression_level(level);
options.set_extreme(extreme);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> XzCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::XzReader<std::unique_ptr<riegeli::Reader>>;
Reader::Options options;
options.set_container(Reader::Container::kXzOrLzma);
options.set_concatenate(true);
return std::make_unique<Reader>(std::move(base_reader), options);
}
}
} | #include "tensorstore/internal/compression/xz_compressor.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include <lzma.h>
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::XzCompressor;
TEST(XzCompressorTest, SmallRoundtrip) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
TENSORSTORE_ASSERT_OK(compressor.Decode(
encode_result.Subcord(3, encode_result.size() - 3), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, SmallRoundtripFragmented) {
XzCompressor compressor;
const absl::Cord input = absl::MakeFragmentedCord(
{"The quick", " brown fox", " jumped over", " ", "the lazy dog."});
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
std::vector<std::string> encode_result_fragments;
for (size_t i = 3; i < encode_result.size(); ++i) {
encode_result_fragments.push_back(std::string(encode_result.Subcord(i, 1)));
}
TENSORSTORE_ASSERT_OK(compressor.Decode(
absl::MakeFragmentedCord(encode_result_fragments), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, LargeRoundtrip) {
std::string input(100000, '\0');
unsigned char x = 0;
for (auto& v : input) {
v = x;
x += 7;
}
XzCompressor compressor;
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(
compressor.Encode(absl::Cord(input), &encode_result, 0));
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultLevel) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.level = 9;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultCheck) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.check = LZMA_CHECK_CRC32;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, DecodeCorruptData) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted[0] = 0;
EXPECT_THAT(compressor.Decode(absl::Cord(corrupted), &decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
EXPECT_THAT(
compressor.Decode(encode_result.Subcord(0, encode_result.size() - 1),
&decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/xz_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/xz_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
37bb9b54-4a1c-427f-85d0-45be0da81ed5 | cpp | google/tensorstore | protobuf | tensorstore/internal/metrics/protobuf.cc | tensorstore/internal/metrics/protobuf_test.cc | #include "tensorstore/internal/metrics/protobuf.h"
#include <stdint.h>
#include <string>
#include <variant>
#include "absl/log/absl_log.h"
namespace tensorstore {
namespace internal_metrics {
namespace {
void SetMetadata(const MetricMetadata& metadata,
metrics_proto::Metadata& proto) {
proto.set_description(metadata.description.data(),
metadata.description.size());
}
template <typename T>
void AddFields(const T& metric, metrics_proto::MetricInstance& proto) {
for (auto& x : metric.fields) proto.add_field(x);
}
void AddValue(const CollectedMetric::Value& metric,
metrics_proto::MetricInstance& proto) {
if (std::holds_alternative<std::monostate>(metric.value) &&
std::holds_alternative<std::monostate>(metric.max_value)) {
return;
}
AddFields(metric, proto);
if (std::holds_alternative<double>(metric.value) ||
std::holds_alternative<double>(metric.max_value)) {
auto* dest = proto.mutable_double_value();
if (std::holds_alternative<double>(metric.value)) {
dest->set_value(std::get<double>(metric.value));
}
if (std::holds_alternative<double>(metric.max_value)) {
dest->set_max_value(std::get<double>(metric.max_value));
}
} else if (std::holds_alternative<int64_t>(metric.value) ||
std::holds_alternative<int64_t>(metric.max_value)) {
auto* dest = proto.mutable_int_value();
if (std::holds_alternative<int64_t>(metric.value)) {
dest->set_value(std::get<int64_t>(metric.value));
}
if (std::holds_alternative<int64_t>(metric.max_value)) {
dest->set_max_value(std::get<int64_t>(metric.max_value));
}
} else if (std::holds_alternative<std::string>(metric.value)) {
auto* dest = proto.mutable_string_value();
dest->set_value(std::get<std::string>(metric.value));
} else {
ABSL_LOG(FATAL) << "Unsupported value";
}
}
void AddHistogram(const CollectedMetric::Histogram& metric,
metrics_proto::MetricInstance& proto) {
AddFields(metric, proto);
auto* hist = proto.mutable_histogram();
hist->set_count(metric.count);
hist->set_mean(metric.mean);
if (metric.count > 1) {
hist->set_sum_of_squared_deviation(metric.sum_of_squared_deviation);
}
int n_zeros = 0;
for (auto x : metric.buckets) {
if (x == 0) {
n_zeros++;
} else {
if (n_zeros > 0) hist->add_bucket(-n_zeros);
n_zeros = 0;
hist->add_bucket(x);
}
}
}
}
void CollectedMetricToProto(const CollectedMetric& metric,
metrics_proto::Metric& proto) {
proto.set_metric_name(metric.metric_name.data(), metric.metric_name.size());
proto.set_tag(metric.tag.data(), metric.tag.size());
for (auto& x : metric.field_names) {
proto.add_field_name(x.data(), x.size());
}
SetMetadata(metric.metadata, *proto.mutable_metadata());
for (auto& x : metric.values) {
AddValue(x, *proto.add_instance());
}
for (auto& x : metric.histograms) {
AddHistogram(x, *proto.add_instance());
}
}
void CollectedMetricToProtoCollection(span<const CollectedMetric> metrics,
metrics_proto::MetricCollection& proto) {
for (auto& metric : metrics) {
CollectedMetricToProto(metric, *proto.add_metric());
}
}
void SortProtoCollection(metrics_proto::MetricCollection& proto) {
std::sort(
proto.mutable_metric()->pointer_begin(),
proto.mutable_metric()->pointer_end(),
[](const metrics_proto::Metric* p1, const metrics_proto::Metric* p2) {
return p1->metric_name() < p2->metric_name();
});
for (int i = 0; i < proto.metric_size(); i++) {
auto& metric = *proto.mutable_metric(i);
std::sort(
metric.mutable_instance()->pointer_begin(),
metric.mutable_instance()->pointer_end(),
[](const metrics_proto::MetricInstance* p1,
const metrics_proto::MetricInstance* p2) {
int n = std::min(p1->field_size(), p2->field_size());
for (int i = 0; i < n; i++) {
if (p1->field(i) != p2->field(i)) {
return p1->field(i) < p2->field(i);
}
}
return std::make_tuple(p1->field_size(), p1->has_int_value(),
p1->has_double_value(), p1->has_string_value(),
p1->has_histogram(),
reinterpret_cast<uintptr_t>(p1)) <
std::make_tuple(p2->field_size(), p2->has_int_value(),
p2->has_double_value(), p2->has_string_value(),
p2->has_histogram(),
reinterpret_cast<uintptr_t>(p2));
});
}
}
}
} | #ifndef TENSORSTORE_METRICS_DISABLED
#include "tensorstore/internal/metrics/protobuf.h"
#include <stdint.h>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/gauge.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/metrics/metrics.pb.h"
#include "tensorstore/internal/metrics/registry.h"
#include "tensorstore/internal/metrics/value.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::Approximately;
using ::protobuf_matchers::EqualsProto;
using ::protobuf_matchers::IgnoringRepeatedFieldOrdering;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::Counter;
using ::tensorstore::internal_metrics::DefaultBucketer;
using ::tensorstore::internal_metrics::Gauge;
using ::tensorstore::internal_metrics::GetMetricRegistry;
using ::tensorstore::internal_metrics::Histogram;
using ::tensorstore::internal_metrics::MetricMetadata;
using ::tensorstore::internal_metrics::Value;
TEST(ProtobufTest, BasicConversion) {
CollectedMetric metric;
metric.metric_name = "abc";
metric.tag = "tag";
metric.values.emplace_back(
CollectedMetric::Value{{"c", "d"}, int64_t{1}, int64_t{2}});
metric.values.emplace_back(CollectedMetric::Value{{"e", "g"}, 2.3, 3.4});
metric.values.emplace_back(CollectedMetric::Value{{}, int64_t{1}});
metric.values.emplace_back(CollectedMetric::Value{{"i"}, 1.2});
metric.values.emplace_back(CollectedMetric::Value{{}, "boo"});
metric.histograms.emplace_back(CollectedMetric::Histogram{
{"h"}, 10, 1, 1, {1, 1, 1, 1, 1}});
tensorstore::metrics_proto::Metric proto;
tensorstore::internal_metrics::CollectedMetricToProto(metric, proto);
EXPECT_THAT(proto,
IgnoringRepeatedFieldOrdering(Approximately(EqualsProto(R"pb(
metric_name: "abc"
tag: "tag"
metadata {}
instance {
field: "c"
field: "d"
int_value { value: 1 max_value: 2 }
}
instance {
field: "e"
field: "g"
double_value { value: 2.3 max_value: 3.4 }
}
instance { int_value { value: 1 } }
instance {
field: "i"
double_value { value: 1.2 }
}
instance { string_value { value: "boo" } }
instance {
field: "h"
histogram {
count: 10
mean: 1
sum_of_squared_deviation: 1
bucket: 1
bucket: 1
bucket: 1
bucket: 1
bucket: 1
}
}
)pb"))));
}
TEST(ProtobufTest, FromRegistry) {
{
auto& counter = Counter<int64_t>::New("/protobuf_test/counter1",
MetricMetadata("A metric"));
counter.Increment();
counter.IncrementBy(2);
}
{
auto& counter = Counter<double>::New("/protobuf_test/counter2",
MetricMetadata("A metric"));
counter.Increment();
counter.IncrementBy(2);
}
{
auto& counter = Counter<int64_t, std::string>::New(
"/protobuf_test/counter3", "field1", MetricMetadata("A metric"));
counter.Increment("a");
counter.IncrementBy(2, "b");
}
{
auto& counter = Counter<double, int>::New(
"/protobuf_test/counter4", "field1", MetricMetadata("A metric"));
counter.Increment(1);
counter.IncrementBy(2, 2);
}
{
auto& gauge = Gauge<int64_t>::New("/protobuf_test/gauge1",
MetricMetadata("A metric"));
gauge.Set(3);
gauge.Increment();
gauge.IncrementBy(2);
}
{
auto& gauge =
Gauge<double>::New("/protobuf_test/gauge2", MetricMetadata("A metric"));
gauge.Set(3);
gauge.Increment();
gauge.IncrementBy(2);
}
{
auto& gauge = Gauge<int64_t, std::string>::New(
"/protobuf_test/gauge3", "field1", MetricMetadata("A metric"));
gauge.Increment("a");
gauge.IncrementBy(2, "a");
gauge.Set(3, "b");
}
{
auto& gauge = Gauge<double, bool>::New("/protobuf_test/gauge4", "field1",
MetricMetadata("A metric"));
gauge.Increment(false);
gauge.IncrementBy(2, false);
gauge.Set(3, true);
}
{
auto& histogram = Histogram<DefaultBucketer>::New(
"/protobuf_test/hist1", MetricMetadata("A metric"));
histogram.Observe(1);
histogram.Observe(2);
histogram.Observe(1000);
}
{
auto& histogram = Histogram<DefaultBucketer, int>::New(
"/protobuf_test/hist2", "field1", MetricMetadata("A metric"));
histogram.Observe(-1.0, 1);
histogram.Observe(0.11, 2);
histogram.Observe(1.2, 3);
histogram.Observe(2.1, 4);
}
{
auto& value = Value<int64_t>::New("/protobuf_test/value1",
MetricMetadata("A metric"));
value.Set(3);
}
{
auto& gauge = Value<std::string>::New("/protobuf_test/value2",
MetricMetadata("A metric"));
gauge.Set("foo");
}
tensorstore::metrics_proto::MetricCollection metric;
tensorstore::internal_metrics::CollectedMetricToProtoCollection(
GetMetricRegistry().CollectWithPrefix("/protobuf_test"), metric);
tensorstore::internal_metrics::SortProtoCollection(metric);
EXPECT_THAT(metric, Approximately(EqualsProto(R"pb(
metric {
metric_name: "/protobuf_test/counter1"
tag: "counter"
metadata { description: "A metric" }
instance { int_value { value: 3 } }
}
metric {
metric_name: "/protobuf_test/counter2"
tag: "counter"
metadata { description: "A metric" }
instance { double_value { value: 3 } }
}
metric {
metric_name: "/protobuf_test/counter3"
tag: "counter"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "a"
int_value { value: 1 }
}
instance {
field: "b"
int_value { value: 2 }
}
}
metric {
metric_name: "/protobuf_test/counter4"
tag: "counter"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "1"
double_value { value: 1 }
}
instance {
field: "2"
double_value { value: 2 }
}
}
metric {
metric_name: "/protobuf_test/gauge1"
tag: "gauge"
metadata { description: "A metric" }
instance { int_value { value: 6 max_value: 6 } }
}
metric {
metric_name: "/protobuf_test/gauge2"
tag: "gauge"
metadata { description: "A metric" }
instance { double_value { value: 6 max_value: 6 } }
}
metric {
metric_name: "/protobuf_test/gauge3"
tag: "gauge"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "a"
int_value { value: 3 max_value: 3 }
}
instance {
field: "b"
int_value { value: 3 max_value: 3 }
}
}
metric {
metric_name: "/protobuf_test/gauge4"
tag: "gauge"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "0"
double_value { value: 3 max_value: 3 }
}
instance {
field: "1"
double_value { value: 3 max_value: 3 }
}
}
metric {
metric_name: "/protobuf_test/hist1"
tag: "default_histogram"
metadata { description: "A metric" }
instance {
histogram {
count: 3
mean: 334.33333333333331
sum_of_squared_deviation: 664668.66666666674
bucket: -2
bucket: 1
bucket: 1
bucket: -7
bucket: 1
}
}
}
metric {
metric_name: "/protobuf_test/hist2"
tag: "default_histogram"
field_name: "field1"
metadata { description: "A metric" }
instance {
field: "1"
histogram { count: 1 mean: -1 bucket: 1 }
}
instance {
field: "2"
histogram { count: 1 mean: 0.11 bucket: -1 bucket: 1 }
}
instance {
field: "3"
histogram { count: 1 mean: 1.2 bucket: -2 bucket: 1 }
}
instance {
field: "4"
histogram { count: 1 mean: 2.1 bucket: -3 bucket: 1 }
}
}
metric {
metric_name: "/protobuf_test/value1"
tag: "value"
metadata { description: "A metric" }
instance { int_value { value: 3 } }
}
metric {
metric_name: "/protobuf_test/value2"
tag: "value"
metadata { description: "A metric" }
instance { string_value { value: "foo" } }
}
)pb")));
}
}
#endif | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/protobuf.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/protobuf_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e235071c-7a31-4583-bb84-dc3a97a962d3 | cpp | google/tensorstore | function | tensorstore/serialization/function.cc | tensorstore/serialization/function_test.cc | #include "tensorstore/serialization/function.h"
#include <string_view>
#include <typeinfo>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
namespace internal_serialization {
bool NonSerializableFunctionBase::Encode(EncodeSink& sink) const {
sink.Fail(internal_serialization::NonSerializableError());
return false;
}
void NonSerializableFunctionBase::GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const {
}
using SerializableFunctionRegistry =
internal::HeterogeneousHashSet<const RegisteredSerializableFunction*,
RegisteredSerializableFunction::Key,
&RegisteredSerializableFunction::key>;
SerializableFunctionRegistry& GetSerializableFunctionRegistry() {
static absl::NoDestructor<SerializableFunctionRegistry> registry;
return *registry;
}
void RegisterSerializableFunction(const RegisteredSerializableFunction& r) {
if (!GetSerializableFunctionRegistry().insert(&r).second) {
ABSL_LOG(FATAL) << "Duplicate SerializableFunction registration: id="
<< r.id << ", signature=" << r.signature->name();
}
}
SerializableFunctionBase::~SerializableFunctionBase() = default;
bool DecodeSerializableFunction(DecodeSource& source,
SerializableFunctionBase::Ptr& value,
const std::type_info& signature) {
std::string_view id;
if (!serialization::Decode(source, id)) return false;
auto& registry = GetSerializableFunctionRegistry();
auto it = registry.find(RegisteredSerializableFunction::Key(signature, id));
if (it == registry.end()) {
source.Fail(absl::DataLossError(
tensorstore::StrCat("SerializableFunction not registered: ", id)));
return false;
}
return (*it)->decode(source, value);
}
}
}
} | #include "tensorstore/serialization/function.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::serialization::BindFront;
using ::tensorstore::serialization::NonSerializable;
using ::tensorstore::serialization::SerializableFunction;
using ::tensorstore::serialization::SerializationRoundTrip;
TEST(SerializationTest, Function) {
SerializableFunction<int()> func([] { return 3; });
EXPECT_EQ(3, func());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func_decoded,
SerializationRoundTrip(func));
EXPECT_EQ(3, func_decoded());
}
TEST(SerializationTest, BindFront) {
SerializableFunction<int()> func =
BindFront([](int a, int b) { return a + b; }, 2, 5);
EXPECT_EQ(7, func());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func_decoded,
SerializationRoundTrip(func));
EXPECT_EQ(7, func_decoded());
}
TEST(SerializationTest, NonSerializable) {
SerializableFunction<int()> func = NonSerializable{[y = 5] { return y; }};
EXPECT_EQ(5, func());
EXPECT_THAT(SerializationRoundTrip(func),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Serialization not supported.*"));
}
struct FunctionWithId1 {
constexpr static const char id[] = "my_test_function1";
int operator()() const { return 1; }
};
struct FunctionWithId2 {
constexpr static const char id[] = "my_test_function2";
int operator()() const { return 2; }
};
TEST(SerializationTest, Id) {
SerializableFunction<int()> func1 = FunctionWithId1{};
SerializableFunction<int()> func2 = FunctionWithId2{};
EXPECT_EQ(1, func1());
EXPECT_EQ(2, func2());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func1_copy,
SerializationRoundTrip(func1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func2_copy,
SerializationRoundTrip(func2));
EXPECT_EQ(1, func1_copy());
EXPECT_EQ(2, func2_copy());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto func1_encoded, tensorstore::serialization::EncodeBatch(func1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_func1_encoded,
tensorstore::serialization::EncodeBatch(
std::string_view(FunctionWithId1::id)));
EXPECT_EQ(expected_func1_encoded, func1_encoded);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/function.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/function_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
92726b70-ffc0-4529-89cd-67ffa266f16e | cpp | google/tensorstore | json | tensorstore/index_space/json.cc | tensorstore/index_space/json_test.cc | #include "tensorstore/index_space/json.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/json/array.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/array.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal_index_space::BuilderFlags;
using ::tensorstore::internal_index_space::OutputIndexMapInitializer;
using ::tensorstore::internal_index_space::TransformRep;
struct DomainJsonKeys {
const char* rank;
const char* inclusive_min;
const char* inclusive_max;
const char* shape;
const char* exclusive_max;
const char* labels;
};
constexpr DomainJsonKeys kIndexDomainJsonKeys = {
"rank", "inclusive_min", "inclusive_max",
"shape", "exclusive_max", "labels",
};
constexpr DomainJsonKeys kIndexTransformJsonKeys = {
"input_rank", "input_inclusive_min", "input_inclusive_max",
"input_shape", "input_exclusive_max", "input_labels",
};
template <typename ElementBinder>
struct ImplicitPairBinder {
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementBinder element_binder;
template <typename Options, typename Obj>
absl::Status operator()(std::true_type is_loading, const Options& options,
Obj* obj, ::nlohmann::json* j) const {
auto&& [element, is_implicit] = *obj;
if (const auto* k = j->get_ptr<const ::nlohmann::json::array_t*>()) {
if (k->size() != 1) {
return internal_json::ExpectedError(
*k, "array of size 1 indicating an implicit value");
}
is_implicit = true;
return element_binder(is_loading, options, &element, &(*k)[0]);
} else {
is_implicit = false;
return element_binder(is_loading, options, &element, j);
}
}
template <typename Options, typename Obj>
absl::Status operator()(std::false_type is_loading, const Options& options,
const Obj* obj, ::nlohmann::json* j) const {
auto&& [element, is_implicit] = *obj;
if (is_implicit) {
::nlohmann::json::array_t k(1);
TENSORSTORE_RETURN_IF_ERROR(
element_binder(is_loading, options, &element, &k[0]));
*j = std::move(k);
} else {
return element_binder(is_loading, options, &element, j);
}
return absl::OkStatus();
}
};
template <typename RankProjection, typename ValuesProjection,
typename ImplicitProjection, typename ElementBinder>
struct ImplicitArrayBinderImpl {
RankProjection rank_ptr;
ValuesProjection values_ptr;
ImplicitProjection implicit_ptr;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementBinder element_binder;
template <typename Loading, typename Options, typename Obj>
absl::Status operator()(Loading is_loading, const Options& options, Obj* obj,
::nlohmann::json* j) const {
return jb::OptionalArray(
[this](const auto& obj) { return std::invoke(values_ptr, obj).size(); },
[this](auto& obj, size_t size) {
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(size));
auto&& rank = std::invoke(rank_ptr, obj);
if (rank == dynamic_rank) {
rank = size;
} else if (rank != static_cast<DimensionIndex>(size)) {
return internal_json::JsonValidateArrayLength(size, rank);
}
std::invoke(values_ptr, obj).resize(size);
return absl::OkStatus();
},
[this](auto& obj, size_t i) {
auto& value = std::invoke(values_ptr, obj)[i];
auto implicit_value = std::invoke(implicit_ptr, obj)[i];
return std::pair<decltype(value), decltype(implicit_value)>(
value, implicit_value);
},
element_binder)(is_loading, options, obj, j);
}
};
template <typename T>
using InlinedVector = absl::InlinedVector<T, internal::kNumInlinedDims>;
struct TransformParserOutput {
Index offset = 0;
Index stride = 1;
std::optional<DimensionIndex> input_dimension;
IndexInterval index_array_bounds;
SharedArray<const Index, dynamic_rank> index_array;
};
struct TransformParserData {
IntervalForm interval_form = IntervalForm::half_open;
BuilderFlags flags{0};
DimensionIndex rank = dynamic_rank;
InlinedVector<Index> lower_bounds;
InlinedVector<Index> upper_bounds;
DimensionSet implicit_lower_bounds;
DimensionSet implicit_upper_bounds;
InlinedVector<std::string> labels;
std::optional<InlinedVector<TransformParserOutput>> output;
Result<TransformRep::Ptr<>> Finalize();
};
constexpr auto TransformParserOutputBinder = jb::Object(
jb::Member("offset",
jb::Projection(&TransformParserOutput::offset,
jb::DefaultValue([](Index* o) { *o = 0; }))),
jb::AtMostOne("input_dimension", "index_array"),
jb::Member("input_dimension",
jb::Projection(&TransformParserOutput::input_dimension,
jb::Optional())),
jb::OptionalMember(
"index_array",
jb::Projection(&TransformParserOutput::index_array, jb::NestedArray())),
jb::OptionalMember(
"index_array_bounds",
jb::Sequence(jb::Initialize([](auto* obj) {
if (!obj->index_array.data()) {
return absl::InvalidArgumentError(
"\"index_array_bounds\" is only valid with "
"\"index_array\"");
}
return absl::OkStatus();
}),
jb::Projection(&TransformParserOutput::index_array_bounds,
jb::DefaultValue(
[](auto* obj) {
*obj = IndexInterval::Infinite();
},
jb::IndexIntervalBinder)))),
jb::OptionalMember(
"stride",
jb::Sequence(
jb::Initialize([](auto* obj) {
if (!obj->input_dimension && !obj->index_array.data()) {
return absl::InvalidArgumentError(
"Either \"input_dimension\" or \"index_array\" must be "
"specified in "
"conjunction with \"stride\"");
}
return absl::OkStatus();
}),
jb::Projection(&TransformParserOutput::stride,
jb::DefaultValue([](Index* s) { *s = 1; }))))
);
template <typename T, typename ElementBinder>
constexpr auto LowerBoundsBinder(ElementBinder element_binder) {
using Binder = ImplicitPairBinder<absl::remove_cvref_t<ElementBinder>>;
auto rank_ptr = &T::rank;
auto value_ptr = &T::lower_bounds;
auto implicit_ptr = &T::implicit_lower_bounds;
return ImplicitArrayBinderImpl<decltype(rank_ptr), decltype(value_ptr),
decltype(implicit_ptr), Binder>{
std::move(rank_ptr), std::move(value_ptr), std::move(implicit_ptr),
Binder{std::move(element_binder)}};
}
template <typename T, typename ElementBinder>
constexpr auto UpperBoundsBinder(ElementBinder element_binder) {
using Binder = ImplicitPairBinder<absl::remove_cvref_t<ElementBinder>>;
auto rank_ptr = &T::rank;
auto value_ptr = &T::upper_bounds;
auto implicit_ptr = &T::implicit_upper_bounds;
return ImplicitArrayBinderImpl<decltype(rank_ptr), decltype(value_ptr),
decltype(implicit_ptr), Binder>{
std::move(rank_ptr), std::move(value_ptr), std::move(implicit_ptr),
Binder{std::move(element_binder)}};
}
constexpr auto IndexTransformParser(
bool is_transform, DimensionIndex input_rank_constraint = dynamic_rank) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
using T = TransformParserData;
auto* keys =
is_transform ? &kIndexTransformJsonKeys : &kIndexDomainJsonKeys;
DimensionIndex* rank = is_loading ? &obj->rank : nullptr;
return jb::Sequence(
jb::AtLeastOne(keys->rank, keys->inclusive_min, keys->shape,
keys->inclusive_max, keys->exclusive_max, keys->labels),
[=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
if constexpr (!is_loading) {
if (j->count(keys->inclusive_min) ||
j->count(keys->exclusive_max) || j->count(keys->labels)) {
return absl::OkStatus();
}
}
return jb::Member(
keys->rank,
jb::Projection(&T::rank,
jb::DefaultValue(
[](DimensionIndex* o) { *o = dynamic_rank; },
jb::Integer<DimensionIndex>(0, kMaxRank)))
)(is_loading, options, obj, j);
},
jb::OptionalMember(keys->inclusive_min,
jb::Sequence(LowerBoundsBinder<T>(
jb::BoundsBinder<-kInfIndex, 0>()),
jb::Initialize([](auto* obj) {
obj->flags |=
(BuilderFlags::kSetLower |
BuilderFlags::kSetImplicitLower);
}))),
jb::AtMostOne(keys->shape, keys->inclusive_max, keys->exclusive_max),
jb::OptionalMember(
keys->shape,
jb::LoadSave(jb::Sequence(
UpperBoundsBinder<T>(jb::BoundsBinder<0, +kInfSize>()),
jb::Initialize([](auto* obj) {
obj->interval_form = IntervalForm::sized;
obj->flags |= (BuilderFlags::kSetUpper |
BuilderFlags::kSetImplicitUpper);
})))),
jb::OptionalMember(
keys->inclusive_max,
jb::LoadSave(jb::Sequence(
UpperBoundsBinder<T>(jb::BoundsBinder<0, +kInfIndex>()),
jb::Initialize([](auto* obj) {
obj->interval_form = IntervalForm::closed;
obj->flags |= (BuilderFlags::kSetUpper |
BuilderFlags::kSetImplicitUpper);
})))),
jb::OptionalMember(
keys->exclusive_max,
jb::Sequence(
UpperBoundsBinder<T>(jb::BoundsBinder<0, +kInfIndex + 1>()),
jb::Initialize([](auto* obj) {
obj->interval_form = IntervalForm::half_open;
obj->flags |= (BuilderFlags::kSetUpper |
BuilderFlags::kSetImplicitUpper);
}))),
jb::OptionalMember(
keys->labels,
jb::Projection(&T::labels, jb::DimensionLabelVector(rank))),
jb::Initialize([=](auto* obj) {
if (!RankConstraint::EqualOrUnspecified(input_rank_constraint,
obj->rank)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ", keys->rank, " to be ", input_rank_constraint,
", but is: ", obj->rank));
}
return absl::OkStatus();
})
)(is_loading, options, obj, j);
};
}
constexpr auto IndexTransformOutputParser(
DimensionIndex output_rank_constraint = dynamic_rank) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
return jb::Sequence(
jb::Member("output", jb::Projection(&TransformParserData::output,
jb::Optional(jb::Array(
TransformParserOutputBinder)))),
jb::Initialize([=](auto* obj) {
if (obj->output) {
if (output_rank_constraint != dynamic_rank &&
obj->output->size() != output_rank_constraint) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected output rank to be ", output_rank_constraint,
", but is: ", obj->output->size()));
}
return absl::OkStatus();
}
const DimensionIndex rank = obj->rank;
if (output_rank_constraint != dynamic_rank &&
output_rank_constraint != rank) {
return absl::InvalidArgumentError("Missing \"output\" member");
}
return absl::OkStatus();
}) )(is_loading, options, obj, j);
};
}
Result<TransformRep::Ptr<>> TransformParserData::Finalize() {
if (!output) {
output.emplace(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
(*output)[i].input_dimension = i;
}
}
const DimensionIndex output_rank = output->size();
auto transform = TransformRep::Allocate(rank, output_rank);
transform->input_rank = rank;
transform->output_rank = output_rank;
if ((flags & BuilderFlags::kSetLower) != BuilderFlags::kDefault) {
std::copy(lower_bounds.begin(), lower_bounds.end(),
transform->input_origin().begin());
transform->implicit_lower_bounds = implicit_lower_bounds;
}
if ((flags & BuilderFlags::kSetUpper) != BuilderFlags::kDefault) {
std::copy(upper_bounds.begin(), upper_bounds.end(),
transform->input_shape().begin());
transform->implicit_upper_bounds = implicit_upper_bounds;
}
if (!labels.empty()) {
std::copy(labels.begin(), labels.end(), transform->input_labels().begin());
}
InlinedVector<OutputIndexMapInitializer> output_maps;
output_maps.reserve(output_rank);
auto maps = transform->output_index_maps();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& out = (*output)[output_dim];
auto& map = maps[output_dim];
map.offset() = out.offset;
map.stride() = out.stride;
output_maps.emplace_back(
out.input_dimension
? OutputIndexMapInitializer(out.input_dimension.value())
: OutputIndexMapInitializer(out.index_array,
out.index_array_bounds));
}
TENSORSTORE_RETURN_IF_ERROR(SetOutputIndexMapsAndValidateTransformRep(
transform.get(), output_maps, interval_form, flags));
return transform;
}
TransformParserData MakeIndexDomainViewDataForSaving(IndexDomainView<> domain) {
const DimensionIndex rank = domain.rank();
TransformParserData tmp;
tmp.rank = rank;
tmp.lower_bounds.resize(rank);
tmp.upper_bounds.resize(rank);
tmp.labels.assign(domain.labels().begin(), domain.labels().end());
tmp.implicit_lower_bounds = domain.implicit_lower_bounds();
tmp.implicit_upper_bounds = domain.implicit_upper_bounds();
bool all_implicit_lower = true;
bool all_implicit_upper = true;
for (DimensionIndex i = 0; i < rank; ++i) {
tmp.lower_bounds[i] = domain[i].inclusive_min();
tmp.upper_bounds[i] = domain[i].exclusive_max();
all_implicit_lower = all_implicit_lower && tmp.implicit_lower_bounds[i] &&
(tmp.lower_bounds[i] == -kInfIndex);
all_implicit_upper = all_implicit_upper && tmp.implicit_upper_bounds[i] &&
(tmp.upper_bounds[i] == (+kInfIndex + 1));
}
if (all_implicit_lower) {
tmp.lower_bounds.resize(0);
}
if (all_implicit_upper) {
tmp.upper_bounds.resize(0);
}
return tmp;
}
TransformParserData MakeIndexTransformViewDataForSaving(
IndexTransformView<> transform) {
auto input_domain = transform.input_domain();
TransformParserData tmp = MakeIndexDomainViewDataForSaving(input_domain);
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
bool all_identity = (output_rank == input_rank);
tmp.output.emplace(output_rank);
auto maps = transform.output_index_maps();
for (DimensionIndex i = 0; i < output_rank; ++i) {
auto& output = (*tmp.output)[i];
const auto map = maps[i];
if (map.offset() != 0) {
output.offset = map.offset();
all_identity = false;
}
if (map.method() != OutputIndexMethod::constant && map.stride() != 1) {
output.stride = map.stride();
all_identity = false;
}
switch (map.method()) {
case OutputIndexMethod::constant:
all_identity = false;
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
output.input_dimension = input_dim;
if (input_dim != i) all_identity = false;
break;
}
case OutputIndexMethod::array: {
all_identity = false;
const auto index_array_data = map.index_array();
output.index_array = UnbroadcastArrayPreserveRank(
UnownedToShared(index_array_data.array_ref()));
IndexInterval index_range = index_array_data.index_range();
if (index_range != IndexInterval::Infinite() &&
!ValidateIndexArrayBounds(index_range, output.index_array).ok()) {
output.index_array_bounds = index_range;
}
break;
}
}
}
if (all_identity) {
tmp.output = std::nullopt;
}
return tmp;
}
}
void to_json(::nlohmann::json& j,
IndexTransformView<> transform) {
if (!transform.valid()) {
j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return;
}
auto binder = jb::Object(IndexTransformParser(true),
IndexTransformOutputParser());
auto tmp = MakeIndexTransformViewDataForSaving(transform);
::nlohmann::json::object_t obj;
auto status = binder(std::false_type{}, IncludeDefaults{false}, &tmp, &obj);
status.IgnoreError();
assert(status.ok());
j = std::move(obj);
}
void to_json(::nlohmann::json& j,
IndexDomainView<> domain) {
if (!domain.valid()) {
j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return;
}
auto binder = jb::Object(IndexTransformParser(false));
auto tmp = MakeIndexDomainViewDataForSaving(domain);
::nlohmann::json::object_t obj;
auto status = binder(std::false_type{}, IncludeDefaults{false}, &tmp, &obj);
status.IgnoreError();
assert(status.ok());
j = std::move(obj);
}
void to_json(::nlohmann::json& j,
IndexInterval interval) {
auto status = jb::IndexIntervalBinder(std::false_type{},
IncludeDefaults{false}, &interval, &j);
status.IgnoreError();
assert(status.ok());
}
namespace internal_index_space {
Result<TransformRep::Ptr<>> ParseIndexTransformFromJson(
const ::nlohmann::json& j, DimensionIndex input_rank_constraint,
DimensionIndex output_rank_constraint) {
if (j.is_discarded()) return TransformRep::Ptr<>(nullptr);
auto result = [&]() -> Result<TransformRep::Ptr<>> {
auto binder = jb::Object(IndexTransformParser(true, input_rank_constraint),
IndexTransformOutputParser(output_rank_constraint)
);
TENSORSTORE_ASSIGN_OR_RETURN(auto parser_data,
jb::FromJson<TransformParserData>(j, binder));
return parser_data.Finalize();
}();
if (result) return result;
return MaybeAnnotateStatus(result.status(),
"Error parsing index transform from JSON");
}
Result<TransformRep::Ptr<>> ParseIndexDomainFromJson(
const ::nlohmann::json& j, DimensionIndex rank_constraint) {
if (j.is_discarded()) return TransformRep::Ptr<>(nullptr);
auto result = [&]() -> Result<TransformRep::Ptr<>> {
auto binder = jb::Object(IndexTransformParser(false, rank_constraint));
TENSORSTORE_ASSIGN_OR_RETURN(auto parser_data,
jb::FromJson<TransformParserData>(j, binder))
return parser_data.Finalize();
}();
if (result) return result;
return MaybeAnnotateStatus(result.status(),
"Error parsing index domain from JSON");
}
}
namespace internal_json_binding {
TENSORSTORE_DEFINE_JSON_BINDER(
ConstrainedRankJsonBinder,
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
if (j->is_discarded()) {
*obj = options.rank().rank;
return absl::OkStatus();
}
TENSORSTORE_RETURN_IF_ERROR(
Integer<DimensionIndex>(0, kMaxRank)(is_loading, options, obj, j));
} else {
if ((!IncludeDefaults(options).include_defaults() &&
options.rank().rank != dynamic_rank) ||
*obj == dynamic_rank) {
*j = ::nlohmann::json::value_t::discarded;
} else {
*j = *obj;
}
}
if (!RankConstraint::EqualOrUnspecified(options.rank().rank, *obj)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ", options.rank().rank, ", but received: ", *obj));
}
return absl::OkStatus();
})
}
} | #include "tensorstore/index_space/json.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal::ParseJson;
IndexTransform<> MakeExampleTransform() {
return tensorstore::IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 7, -kInfIndex, 8})
.input_exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.input_labels({"x", "y", "z", "t"})
.output_constant(0, 3)
.output_single_input_dimension(1, 0, 2, 2)
.output_index_array(2, 7, 1,
tensorstore::MakeArray<Index>({{
{{1}},
{{2}},
{{3}},
}}))
.Finalize()
.value();
}
IndexTransform<> MakeUnlabeledExampleTransform() {
return tensorstore::IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 7, -kInfIndex, 8})
.input_exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.output_constant(0, 3)
.output_single_input_dimension(1, 0, 2, 2)
.output_index_array(2, 7, 1,
tensorstore::MakeArray<Index>({{
{{1}},
{{2}},
{{3}},
}}),
IndexInterval::Closed(1, 2))
.Finalize()
.value();
}
::nlohmann::json MakeUnlabeledExampleJson() {
return ParseJson(R"(
{
"input_inclusive_min": ["-inf", 7, ["-inf"], [8]],
"input_exclusive_max": ["+inf", 10, ["+inf"], [17]],
"output": [
{"offset": 3},
{"stride": 2, "input_dimension": 2},
{
"offset": 7,
"index_array": [[ [[1]], [[2]], [[3]] ]],
"index_array_bounds": [1, 2]
}
]
}
)");
}
::nlohmann::json MakeLabeledExampleJson() {
return ParseJson(R"(
{
"input_inclusive_min": ["-inf", 7, ["-inf"], [8]],
"input_exclusive_max": ["+inf", 10, ["+inf"], [17]],
"input_labels": ["x", "y", "z", "t"],
"output": [
{"offset": 3},
{"stride": 2, "input_dimension": 2},
{"offset": 7, "index_array": [[ [[1]], [[2]], [[3]] ]]}
]
}
)");
}
TEST(ToJsonTest, Unlabeled) {
EXPECT_EQ(MakeUnlabeledExampleJson(),
::nlohmann::json(MakeUnlabeledExampleTransform()));
}
TEST(ToJsonTest, Labeled) {
EXPECT_EQ(MakeLabeledExampleJson(), ::nlohmann::json(MakeExampleTransform()));
}
TEST(IndexTransformJsonBinderTest, IndexArrayOutOfBounds) {
tensorstore::TestJsonBinderRoundTrip<IndexTransform<>>({
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}))
.Finalize()
.value(),
{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, 2, 3}}},
}},
}},
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}),
IndexInterval::UncheckedClosed(1, 2))
.Finalize()
.value(),
{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, 2, 3}}, {"index_array_bounds", {1, 2}}},
}},
}},
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(
0, 0, 1, tensorstore::MakeArray<Index>({1, kInfIndex + 1, 3}))
.Finalize()
.value(),
{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, kInfIndex + 1, 3}}},
}},
}},
});
tensorstore::TestJsonBinderToJson<IndexTransform<>>({
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}),
IndexInterval::Closed(1, 3))
.Finalize()
.value(),
::testing::Optional(MatchesJson(::nlohmann::json{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, 2, 3}}},
}},
}))},
});
}
TEST(ToJsonTest, NullTransform) {
EXPECT_TRUE(::nlohmann::json(tensorstore::IndexTransform<>()).is_discarded());
}
TEST(ToJsonTest, IdentityTransform) {
EXPECT_EQ(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [4, 6]
}
)")
.dump(),
::nlohmann::json(tensorstore::IdentityTransform(
tensorstore::BoxView({1, 2}, {3, 4})))
.dump());
}
TEST(ToJsonTest, Translation) {
EXPECT_EQ(
::nlohmann::json({
{"input_inclusive_min", {1, 2}},
{"input_exclusive_max", {4, 6}},
{"output",
{
{{"offset", -1}, {"input_dimension", 0}},
{{"offset", -2}, {"input_dimension", 1}},
}},
}),
::nlohmann::json(ChainResult(tensorstore::IdentityTransform(
tensorstore::BoxView({3, 4})),
tensorstore::AllDims().TranslateTo({1, 2}))
.value()));
}
void TestRoundTripJson(const ::nlohmann::json& json) {
SCOPED_TRACE(json.dump());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto parsed,
tensorstore::ParseIndexTransform(json));
EXPECT_EQ(json, ::nlohmann::json(parsed));
}
TEST(RoundTripJsonTest, Labels) {
TestRoundTripJson({
{"input_inclusive_min", {1}},
{"input_exclusive_max", {3}},
});
TestRoundTripJson({
{"input_inclusive_min", {1}},
{"input_exclusive_max", {3}},
{"input_labels", {"x"}},
});
}
TEST(RoundTripJsonTest, Rank0) {
TestRoundTripJson({
{"input_rank", 0},
});
}
TEST(RoundTripJsonTest, Input1Output0) {
TestRoundTripJson({
{"input_rank", 1},
{"output", ::nlohmann::json::array_t()},
});
}
TEST(RoundTripJsonTest, LabelsOnly) {
TestRoundTripJson({
{"input_labels", {"x", "y", "z"}},
});
}
TEST(RoundTripJsonTest, MinOnlyNotImplicit) {
TestRoundTripJson({
{"input_inclusive_min", {"-inf"}},
});
}
TEST(RoundTripJsonTest, MaxOnlyNotImplicit) {
TestRoundTripJson({
{"input_exclusive_max", {"+inf"}},
});
}
TEST(ParseIndexTransformTest, Null) {
EXPECT_EQ(IndexTransform<>(),
tensorstore::ParseIndexTransform(
::nlohmann::json(::nlohmann::json::value_t::discarded)));
}
TEST(ParseIndexTransformTest, DynamicFromLabeled) {
EXPECT_EQ(MakeExampleTransform(),
tensorstore::ParseIndexTransform(MakeLabeledExampleJson()));
}
TEST(ParseIndexTransformTest, DynamicFromUnlabeled) {
EXPECT_EQ(MakeUnlabeledExampleTransform(),
tensorstore::ParseIndexTransform(MakeUnlabeledExampleJson()));
}
TEST(ParseIndexTransformTest, Static) {
auto t = tensorstore::ParseIndexTransform<4, 3>(MakeLabeledExampleJson());
static_assert(
std::is_same_v<decltype(t), Result<tensorstore::IndexTransform<4, 3>>>);
EXPECT_EQ(MakeExampleTransform(), t);
}
TEST(ParseIndexTransformTest, IdentityTransformExclusiveMax) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_exclusive_max({5, kInfIndex + 1})
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, "+inf"]
}
)")));
}
TEST(ParseIndexTransformTest, IdentityTransformInclusiveMax) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_inclusive_max({5, kInfIndex})
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_inclusive_max": [5, "+inf"]
}
)")));
}
TEST(ParseIndexTransformTest, IdentityTransformShape) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({5, kInfSize})
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_shape": [5, "+inf"]
}
)")));
}
TEST(ParseIndexTransformTest, IdentityTransformInputRank) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_rank": 2
}
)")));
}
TEST(ParseIndexTransformTest, StaticInputRankMismatch) {
EXPECT_THAT(
(tensorstore::ParseIndexTransform<3, 3>(MakeLabeledExampleJson())),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Expected input_rank to be 3, but is: 4"));
}
TEST(ParseIndexTransformTest, StaticOutputRankMismatch) {
EXPECT_THAT(
(tensorstore::ParseIndexTransform<4, 2>(MakeLabeledExampleJson())),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Expected output rank to be 2, but is: 3"));
}
TEST(ParseIndexTransformTest, MissingInputRank) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"output": [
{"offset": 3},
{"stride": 2, "input_dimension": 2},
{"offset": 7, "index_array": [[ [[1]], [[2]], [[3]] ]]}
]
}
)")),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At least one of \"input_rank\", \"input_inclusive_min\", "
"\"input_shape\", \"input_inclusive_max\", \"input_exclusive_max\", "
"\"input_labels\" members must be specified"));
}
TEST(ParseIndexTransformTest, InvalidInputRank) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_rank": -3
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_rank\": "
"Expected integer .*, but received: -3"));
}
TEST(ParseIndexTransformTest, InvalidShape) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_shape": [1, 2, 3]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_shape\": "
"Array has length 3 but should have length 2"));
}
TEST(ParseIndexTransformTest, ExclusiveMaxAndInclusiveMax) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"input_inclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At most one of \"input_shape\", \"input_inclusive_max\", "
"\"input_exclusive_max\" members is allowed"));
}
TEST(ParseIndexTransformTest, ExclusiveMaxAndShape) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"input_shape": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At most one of \"input_shape\", \"input_inclusive_max\", "
"\"input_exclusive_max\" members is allowed"));
}
TEST(ParseIndexTransformTest, InclusiveMaxAndShape) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_inclusive_max": [5, 10],
"input_shape": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At most one of \"input_shape\", \"input_inclusive_max\", "
"\"input_exclusive_max\" members is allowed"));
}
TEST(ParseIndexTransformTest, MissingOutputs) {
auto json = ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10]
}
)");
EXPECT_EQ((tensorstore::IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_exclusive_max({5, 10})
.output_identity_transform()
.Finalize()
.value()),
(tensorstore::ParseIndexTransform<dynamic_rank, 2>(json)));
EXPECT_THAT((tensorstore::ParseIndexTransform<dynamic_rank, 3>(json)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Missing \"output\" member"));
}
TEST(ParseIndexTransformTest, InvalidInterval) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 11],
"input_exclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ParseIndexTransformTest, UnexpectedTopLevelMember) {
EXPECT_THAT((tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"extra": "value"
}
)"))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Object includes extra members: \"extra\""));
}
TEST(ParseIndexTransformTest, UnexpectedOutputMember) {
EXPECT_THAT((tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [2],
"output": [
{"extra": "value"}
]
}
)"))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Object includes extra members: \"extra\""));
}
TEST(ParseIndexTransformTest, InvalidLabel) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"input_labels": [1, 2]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_labels\": "
"Error parsing value at position 0: "
"Expected string, but received: 1"));
}
TEST(ParseIndexTransformTest, InvalidBound) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, "a"],
"input_exclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_inclusive_min\": "
"Error parsing value at position 1: "
"Expected 64-bit signed integer or \"-inf\", "
"but received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidBoundPositiveInfinity) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, "+inf"],
"input_exclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_inclusive_min\": "
"Error parsing value at position 1: "
"Expected 64-bit signed integer or \"-inf\", "
"but received: \"\\+inf\""));
}
TEST(ParseIndexTransformTest, InvalidBoundNegativeInfinity) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, "-inf"],
"input_exclusive_max": [5, "-inf"]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_exclusive_max\": "
"Error parsing value at position 1: "
"Expected 64-bit signed integer or \"\\+inf\", "
"but received: \"-inf\""));
}
TEST(ParseIndexTransformTest, InvalidOutputOffset) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"offset": "a"}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"offset\": "
"Expected 64-bit signed integer, but received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidOutputStride) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"stride": "a", "input_dimension": 0}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"stride\": "
"Expected 64-bit signed integer, but received: \"a\""));
}
TEST(ParseIndexTransformTest, UnexpectedStride) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"stride": 1}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"stride\": "
"Either \"input_dimension\" or \"index_array\" must be "
"specified in conjunction with \"stride\""));
}
TEST(ParseIndexTransformTest, InvalidOutputInput) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"input_dimension": "a"}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"input_dimension\": "
"Expected 64-bit signed integer, but received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidOutputArray) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"index_array": "a"}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"index_array\": "
"Error parsing array element at position \\{\\}: "
".* received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidOutputInputAndArray) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"input_dimension": 0, "index_array": [1]}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"At most one of \"input_dimension\", \"index_array\" "
"members is allowed"));
}
TEST(ParseIndexTransformTest, DuplicateLabels) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_labels": ["x", "x"]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_labels\": "
"Dimension label.*"));
}
TEST(IndexDomainJsonBinderTest, Simple) {
tensorstore::TestJsonBinderRoundTrip<tensorstore::IndexDomain<>>({
{tensorstore::IndexDomainBuilder<4>()
.origin({-kInfIndex, 7, -kInfIndex, 8})
.exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.labels({"x", "y", "z", "t"})
.Finalize()
.value(),
{
{"inclusive_min", {"-inf", 7, {"-inf"}, {8}}},
{"exclusive_max", {"+inf", 10, {"+inf"}, {17}}},
{"labels", {"x", "y", "z", "t"}},
}},
});
tensorstore::TestJsonBinderFromJson<tensorstore::IndexDomain<>>({
{{
{"rank", 33},
},
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Error parsing object member \"rank\": "
"Expected integer in the range \\[0, 32\\], but received: 33")},
{{
{"shape", {1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1}},
},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Error parsing object member \"shape\": "
"Rank 33 is outside valid range \\[0, 32\\]")},
{{
{"labels", {"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", ""}},
},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Error parsing object member \"labels\": "
"Rank 33 is outside valid range \\[0, 32\\]")},
{{
{"inclusive_min", {"-inf", 7, {"-inf"}, {8}}},
{"exclusive_max", {"+inf", 10, {"+inf"}, {17}}},
{"labels", {"x", "y", "z", "t"}},
{"output", "abc"},
},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Object includes extra members: \"output\"")},
});
}
TEST(IndexBinderTest, Basic) {
using ::tensorstore::kMaxFiniteIndex;
tensorstore::TestJsonBinderRoundTrip<Index>(
{
{-kInfIndex, "-inf"},
{+kInfIndex, "+inf"},
{-kMaxFiniteIndex, -kMaxFiniteIndex},
{0, 0},
{5, 5},
{+kMaxFiniteIndex, +kMaxFiniteIndex},
},
tensorstore::internal_json_binding::IndexBinder);
tensorstore::TestJsonBinderFromJson<Index>(
{
{"abc", MatchesStatus(absl::StatusCode::kInvalidArgument)},
{-kInfIndex, ::testing::Optional(MatchesJson(-kInfIndex))},
{+kInfIndex, ::testing::Optional(MatchesJson(+kInfIndex))},
{-kInfIndex - 1, MatchesStatus(absl::StatusCode::kInvalidArgument)},
{kInfIndex + 1, MatchesStatus(absl::StatusCode::kInvalidArgument)},
},
tensorstore::internal_json_binding::IndexBinder);
}
TEST(IndexIntervalBinderTest, Basic) {
using ::tensorstore::IndexInterval;
tensorstore::TestJsonBinderRoundTrip<IndexInterval>({
{IndexInterval::UncheckedClosed(5, 10), {5, 10}},
{IndexInterval(), {"-inf", "+inf"}},
{IndexInterval::UncheckedClosed(5, 4), {5, 4}},
{IndexInterval::UncheckedClosed(-kInfIndex, 20), {"-inf", 20}},
{IndexInterval::UncheckedClosed(20, +kInfIndex), {20, "+inf"}},
});
tensorstore::TestJsonBinderFromJson<IndexInterval>({
{"abc", MatchesStatus(absl::StatusCode::kInvalidArgument)},
{{-kInfIndex - 1, 10}, MatchesStatus(absl::StatusCode::kInvalidArgument)},
{{10, 5}, MatchesStatus(absl::StatusCode::kInvalidArgument)},
});
}
TEST(ConstrainedRankJsonBinderTest, RoundTripNoConstraintIncludeDefaults) {
tensorstore::TestJsonBinderRoundTrip<DimensionIndex>(
{
{5, 5},
{dynamic_rank, ::nlohmann::json::value_t::discarded},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder);
}
TEST(ConstrainedRankJsonBinderTest, RoundTripNoConstraintExcludeDefaults) {
tensorstore::TestJsonBinderRoundTrip<DimensionIndex>(
{
{5, 5},
{dynamic_rank, ::nlohmann::json::value_t::discarded},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::IncludeDefaults{false});
}
TEST(ConstrainedRankJsonBinderTest, RoundTripRankConstraintIncludeDefaults) {
tensorstore::TestJsonBinderRoundTrip<DimensionIndex>(
{
{30, 30},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::JsonSerializationOptions{tensorstore::RankConstraint{30},
tensorstore::IncludeDefaults{true}},
tensorstore::RankConstraint{30});
}
TEST(ConstrainedRankJsonBinderTest, FromJsonRankConstraint) {
tensorstore::TestJsonBinderFromJson<DimensionIndex>(
{
{30, ::testing::Optional(30)},
{::nlohmann::json::value_t::discarded, ::testing::Optional(30)},
{5, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 30, but received: 5")},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::RankConstraint{30});
}
TEST(ConstrainedRankJsonBinderTest, ToJsonRankConstraintIncludeDefaults) {
tensorstore::TestJsonBinderToJson<DimensionIndex>(
{
{30, ::testing::Optional(MatchesJson(30))},
{dynamic_rank, ::testing::Optional(MatchesJson(
::nlohmann::json::value_t::discarded))},
{5, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 30, but received: 5")},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::JsonSerializationOptions{
tensorstore::RankConstraint{30}, tensorstore::IncludeDefaults{true}});
}
TEST(ConstrainedRankJsonBinderTest, ToJsonRankConstraintExcludeDefaults) {
tensorstore::TestJsonBinderToJson<DimensionIndex>(
{
{30, ::testing::Optional(
MatchesJson(::nlohmann::json::value_t::discarded))},
{dynamic_rank, ::testing::Optional(MatchesJson(
::nlohmann::json::value_t::discarded))},
{5, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 30, but received: 5")},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::JsonSerializationOptions{tensorstore::IncludeDefaults{false},
tensorstore::RankConstraint{30}});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/json.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/json_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
38258bcd-dd0a-45b3-b9e0-17ede1681162 | cpp | google/tensorstore | registry | tensorstore/internal/metrics/registry.cc | tensorstore/internal/metrics/registry_test.cc | #include "tensorstore/internal/metrics/registry.h"
#include <cassert>
#include <memory>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/strings/match.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/metrics/collect.h"
namespace tensorstore {
namespace internal_metrics {
void MetricRegistry::AddInternal(std::string_view metric_name,
MetricRegistry::Metric m,
std::shared_ptr<void> hook) {
ABSL_CHECK(m) << metric_name;
absl::MutexLock l(&mu_);
ABSL_CHECK(
entries_.try_emplace(metric_name, Entry{std::move(m), std::move(hook)})
.second)
<< metric_name;
}
std::vector<CollectedMetric> MetricRegistry::CollectWithPrefix(
std::string_view prefix) {
std::vector<CollectedMetric> all;
all.reserve(entries_.size());
absl::MutexLock l(&mu_);
for (auto& kv : entries_) {
if (prefix.empty() || absl::StartsWith(kv.first, prefix)) {
auto opt_metric = kv.second.poly(CollectMetricTag{});
if (opt_metric.has_value()) {
all.emplace_back(*std::move(opt_metric));
assert(all.back().metric_name == kv.first);
}
}
}
for (auto& hook : collect_hooks_) {
hook(prefix, all);
}
return all;
}
std::optional<CollectedMetric> MetricRegistry::Collect(std::string_view name) {
absl::MutexLock l(&mu_);
auto it = entries_.find(name);
if (it == entries_.end()) return std::nullopt;
auto opt_metric = it->second.poly(CollectMetricTag{});
assert(!opt_metric.has_value() || opt_metric->metric_name == it->first);
return opt_metric;
}
MetricRegistry& GetMetricRegistry() {
static absl::NoDestructor<MetricRegistry> registry;
return *registry;
}
void MetricRegistry::Reset() {
absl::MutexLock l(&mu_);
for (auto& [k, v] : entries_) {
v.poly(ResetMetricTag{});
}
}
}
} | #include "tensorstore/internal/metrics/registry.h"
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/collect.h"
namespace {
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::MetricRegistry;
TEST(RegistryTest, Arbitrary) {
MetricRegistry registry;
registry.AddGeneric("/my/metric", [] {
CollectedMetric metric;
metric.metric_name = "/my/metric";
return metric;
});
registry.AddGeneric("/my/metric2", [] {
CollectedMetric metric;
metric.metric_name = "/my/metric2";
return metric;
});
EXPECT_FALSE(registry.Collect("/my/foo").has_value());
auto collected = registry.Collect("/my/metric");
ASSERT_TRUE(collected.has_value());
EXPECT_EQ("/my/metric", collected->metric_name);
auto all = registry.CollectWithPrefix("/my");
EXPECT_EQ(2, all.size());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/registry.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/registry_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d7178847-5f31-485b-8ced-289d86dd036a | cpp | google/tensorstore | absl_time | tensorstore/serialization/absl_time.cc | tensorstore/internal/json_binding/absl_time_test.cc | #include "tensorstore/serialization/absl_time.h"
#include <cstdint>
#include <limits>
#include "absl/time/time.h"
#include "tensorstore/serialization/serialization.h"
namespace tensorstore {
namespace serialization {
bool Serializer<absl::Duration>::Encode(EncodeSink& sink,
const absl::Duration& value) {
int64_t rep_hi = absl::time_internal::GetRepHi(value);
uint32_t rep_lo = absl::time_internal::GetRepLo(value);
return serialization::EncodeTuple(sink, rep_hi, rep_lo);
}
bool Serializer<absl::Duration>::Decode(DecodeSource& source,
absl::Duration& value) {
int64_t rep_hi;
uint32_t rep_lo;
using absl::time_internal::kTicksPerSecond;
if (!serialization::DecodeTuple(source, rep_hi, rep_lo)) return false;
if (rep_lo >= kTicksPerSecond &&
(rep_lo != std::numeric_limits<uint32_t>::max() ||
(rep_hi != std::numeric_limits<int64_t>::min() &&
rep_hi != std::numeric_limits<int64_t>::max()))) {
source.Fail(serialization::DecodeError("Invalid time representation"));
return false;
}
value = absl::time_internal::MakeDuration(rep_hi, rep_lo);
return true;
}
bool Serializer<absl::Time>::Encode(EncodeSink& sink, const absl::Time& value) {
return serialization::Encode(sink, value - absl::UnixEpoch());
}
bool Serializer<absl::Time>::Decode(DecodeSource& source, absl::Time& value) {
absl::Duration d;
if (!serialization::Decode(source, d)) return false;
value = absl::UnixEpoch() + d;
return true;
}
}
} | #include "tensorstore/internal/json_binding/absl_time.h"
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/time/civil_time.h"
#include "absl/time/time.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(AbslTimeJsonBinder, Roundtrips) {
const absl::TimeZone utc = absl::UTCTimeZone();
const absl::CivilSecond cs(2015, 2, 3, 4, 5, 6);
tensorstore::TestJsonBinderRoundTrip<absl::Time>(
{
{absl::FromCivil(cs, utc), "2015-02-03T04:05:06+00:00"},
{absl::FromCivil(absl::CivilMinute(cs), utc),
"2015-02-03T04:05:00+00:00"},
{absl::FromCivil(absl::CivilHour(cs), utc),
"2015-02-03T04:00:00+00:00"},
{absl::FromCivil(absl::CivilDay(cs), utc),
"2015-02-03T00:00:00+00:00"},
{absl::FromCivil(absl::CivilMonth(cs), utc),
"2015-02-01T00:00:00+00:00"},
{absl::FromCivil(absl::CivilYear(cs), utc),
"2015-01-01T00:00:00+00:00"},
},
jb::Rfc3339TimeBinder);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/absl_time.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/absl_time_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d2d45273-35c5-4952-a6d7-b819d01e7583 | cpp | google/tensorstore | data_type_endian_conversion | tensorstore/internal/data_type_endian_conversion.cc | tensorstore/internal/data_type_endian_conversion_test.cc | #include "tensorstore/internal/data_type_endian_conversion.h"
#include <cassert>
#include <complex>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
void EncodeArray(ArrayView<const void> source, ArrayView<void> target,
endian target_endian) {
const DataType dtype = source.dtype();
assert(absl::c_equal(source.shape(), target.shape()));
assert(dtype == target.dtype());
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
internal::IterateOverStridedLayouts<2>(
{(target_endian == endian::native) ? functions.copy
: functions.swap_endian,
nullptr},
nullptr, source.shape(),
{{const_cast<void*>(source.data()), target.data()}},
{{source.byte_strides().data(), target.byte_strides().data()}},
skip_repeated_elements, {{dtype.size(), dtype.size()}});
}
namespace {
static_assert(sizeof(bool) == 1);
struct DecodeBoolArray {
void operator()(unsigned char* source, bool* output, void*) const {
*output = static_cast<bool>(*source);
}
};
struct DecodeBoolArrayInplace {
void operator()(unsigned char* source, void*) const {
*source = static_cast<bool>(*source);
}
};
}
void DecodeArray(ArrayView<const void> source, endian source_endian,
ArrayView<void> target) {
const DataType dtype = source.dtype();
assert(absl::c_equal(source.shape(), target.shape()));
assert(dtype == target.dtype());
if (dtype.id() != DataTypeId::bool_t) {
EncodeArray(source, target, source_endian);
return;
}
internal::IterateOverStridedLayouts<2>(
{SimpleElementwiseFunction<
DecodeBoolArray(unsigned char, bool), void*>(),
nullptr},
nullptr, source.shape(),
{{const_cast<void*>(source.data()), target.data()}},
{{source.byte_strides().data(), target.byte_strides().data()}},
skip_repeated_elements, {{1, 1}});
}
void DecodeArray(SharedArrayView<void>* source, endian source_endian,
StridedLayoutView<> decoded_layout) {
assert(source != nullptr);
assert(absl::c_equal(source->shape(), decoded_layout.shape()));
const DataType dtype = source->dtype();
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
if ((reinterpret_cast<std::uintptr_t>(source->data()) % dtype->alignment) ==
0 &&
std::all_of(source->byte_strides().begin(), source->byte_strides().end(),
[&](Index byte_stride) {
return (byte_stride % dtype->alignment) == 0;
})) {
const ElementwiseFunction<1, void*>* convert_func = nullptr;
if (dtype.id() == DataTypeId::bool_t) {
convert_func =
SimpleElementwiseFunction<DecodeBoolArrayInplace(unsigned char),
void*>();
} else if (source_endian != endian::native &&
functions.swap_endian_inplace) {
convert_func = functions.swap_endian_inplace;
}
if (convert_func) {
internal::IterateOverStridedLayouts<1>(
{convert_func,
nullptr},
nullptr, source->shape(), {{source->data()}},
{{source->byte_strides().data()}},
skip_repeated_elements, {{dtype.size()}});
}
} else {
*source = CopyAndDecodeArray(*source, source_endian, decoded_layout);
}
}
SharedArrayView<void> CopyAndDecodeArray(ArrayView<const void> source,
endian source_endian,
StridedLayoutView<> decoded_layout) {
SharedArrayView<void> target(
internal::AllocateAndConstructSharedElements(
decoded_layout.num_elements(), default_init, source.dtype()),
decoded_layout);
DecodeArray(source, source_endian, target);
return target;
}
SharedArrayView<const void> TryViewCordAsArray(const absl::Cord& source,
Index offset, DataType dtype,
endian source_endian,
StridedLayoutView<> layout) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
if (source_endian != endian::native && functions.swap_endian_inplace) {
return {};
}
auto maybe_flat = source.TryFlat();
if (!maybe_flat) {
return {};
}
ByteStridedPointer<const void> ptr = maybe_flat->data();
ptr += offset;
if ((reinterpret_cast<std::uintptr_t>(ptr.get()) % dtype->alignment) != 0 ||
!std::all_of(layout.byte_strides().begin(), layout.byte_strides().end(),
[&](Index byte_stride) {
return (byte_stride % dtype->alignment) == 0;
})) {
return {};
}
auto shared_cord = std::make_shared<absl::Cord>(source);
if (auto shared_flat = shared_cord->TryFlat();
!shared_flat || shared_flat->data() != maybe_flat->data()) {
return {};
}
return SharedArrayView<const void>(
SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(shared_cord), ptr.get()),
dtype),
layout);
}
}
} | #include "tensorstore/internal/data_type_endian_conversion.h"
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Array;
using ::tensorstore::c_order;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::endian;
using ::tensorstore::fortran_order;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::StridedLayout;
using ::tensorstore::internal::DecodeArray;
using ::tensorstore::internal::EncodeArray;
using ::tensorstore::internal::TryViewCordAsArray;
TEST(EncodeDecodeArrayTest, Uint8) {
uint8_t source[6] = {1, 2, 3, 4, 5, 6};
uint8_t dest1[6];
uint8_t dest2[6];
uint8_t dest3[6];
uint8_t dest4[6];
EncodeArray(Array(source, {2, 3}, c_order),
Array(dest1, {2, 3}, fortran_order), endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
EncodeArray(Array(source, {2, 3}, c_order),
Array(dest2, {2, 3}, fortran_order), endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
DecodeArray(Array(source, {2, 3}, c_order), endian::little,
Array(dest3, {2, 3}, fortran_order));
EXPECT_THAT(dest3, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
DecodeArray(Array(source, {2, 3}, c_order), endian::big,
Array(dest4, {2, 3}, fortran_order));
EXPECT_THAT(dest4, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
}
TEST(EncodeDecodeArrayTest, Uint16) {
uint16_t source[6] = {0x1234, 0x5678, 0x9012, 0x3456, 0x7890, 0x3344};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<uint16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({0x0,
0x34, 0x12, 0x56, 0x34,
0x78, 0x56, 0x90, 0x78,
0x12, 0x90, 0x44, 0x33}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<uint16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({0x0,
0x12, 0x34, 0x34, 0x56,
0x56, 0x78, 0x78, 0x90,
0x90, 0x12, 0x33, 0x44}));
}
TEST(EncodeDecodeArrayTest, Float16) {
using ::tensorstore::dtypes::float16_t;
float16_t source[6] = {float16_t(1.0), float16_t(2.0), float16_t(3.0),
float16_t(4.0), float16_t(5.0), float16_t(6.0)};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<float16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({0x0,
0x00, 0x3c,
0x00, 0x44,
0x00, 0x40,
0x00, 0x45,
0x00, 0x42,
0x00, 0x46}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<float16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({
0x0,
0x3c, 0x00,
0x44, 0x00,
0x40, 0x00,
0x45, 0x00,
0x42, 0x00,
0x46, 0x00,
}));
}
TEST(EncodeDecodeArrayTest, Bfloat16) {
using ::tensorstore::dtypes::bfloat16_t;
bfloat16_t source[6] = {bfloat16_t(1.0), bfloat16_t(2.0), bfloat16_t(3.0),
bfloat16_t(4.0), bfloat16_t(5.0), bfloat16_t(6.0)};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<bfloat16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({
0x0,
0x80, 0x3f,
0x80, 0x40,
0x00, 0x40,
0xa0, 0x40,
0x40, 0x40,
0xc0, 0x40,
}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<bfloat16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({
0x0,
0x3f, 0x80,
0x40, 0x80,
0x40, 0x00,
0x40, 0xa0,
0x40, 0x40,
0x40, 0xc0,
}));
}
TEST(DecodeArrayTest, Bool) {
unsigned char source[6] = {0x12, 0x00, 0x34, 0x1, 0x78, 0x00};
unsigned char dest[6];
DecodeArray(Array(reinterpret_cast<bool*>(source), {2, 3}, c_order),
endian::little,
Array(reinterpret_cast<bool*>(dest), {2, 3}, fortran_order));
EXPECT_THAT(dest, ::testing::ElementsAre(1, 1, 0, 1, 1, 0));
}
TEST(DecodeArrayTest, Uint16InPlaceLittleEndian) {
alignas(2) unsigned char source[12] = {0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::little, alt_layout);
EXPECT_EQ(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), source_array.layout());
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x3412, 0x7856, 0x1290},
{0x5634, 0x9078, 0x4433}}));
}
TEST(DecodeArrayTest, Uint16InPlaceBigEndian) {
alignas(2) unsigned char source[12] = {0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::big, alt_layout);
EXPECT_EQ(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), source_array.layout());
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x1234, 0x5678, 0x9012},
{0x3456, 0x7890, 0x3344}}));
}
TEST(DecodeArrayTest, Uint16InPlaceLittleEndianUnaligned) {
alignas(2) unsigned char source[13] = {0x00,
0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source + 1), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::little, alt_layout);
EXPECT_NE(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), alt_layout);
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x3412, 0x7856, 0x1290},
{0x5634, 0x9078, 0x4433}}));
}
void TestConvertCordInplace(DataType dtype, endian endian_value,
ContiguousLayoutOrder order,
bool expected_inplace) {
SCOPED_TRACE(tensorstore::StrCat("dtype=", dtype, ", order=", order,
", endian=", endian_value));
auto orig_array = tensorstore::AllocateArray(
{4, 5, 6}, order, tensorstore::default_init, dtype);
EXPECT_EQ(1, orig_array.pointer().use_count());
auto cord = absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(orig_array.data()),
dtype.size() * orig_array.num_elements()),
[owner = orig_array.pointer()](std::string_view s) {});
auto cord_array = TryViewCordAsArray(cord, 0, dtype, endian_value,
orig_array.layout());
if (expected_inplace) {
EXPECT_EQ(orig_array.data(), cord_array.data());
EXPECT_EQ(2, orig_array.pointer().use_count());
cord.Clear();
EXPECT_EQ(2, orig_array.pointer().use_count());
} else {
EXPECT_FALSE(cord_array.valid());
}
}
TEST(TryViewCordAsArrayTest, Inplace) {
const DataType data_types[] = {dtype_v<uint8_t>, dtype_v<uint16_t>,
dtype_v<uint32_t>, dtype_v<uint64_t>};
for (auto dtype : data_types) {
for (auto order : {tensorstore::c_order, tensorstore::fortran_order}) {
TestConvertCordInplace(dtype, endian::native, order,
true);
}
}
constexpr endian non_native =
endian::native == endian::little ? endian::big : endian::little;
TestConvertCordInplace(dtype_v<uint8_t>, non_native, tensorstore::c_order,
true);
TestConvertCordInplace(dtype_v<bool>, non_native, tensorstore::c_order,
true);
TestConvertCordInplace(dtype_v<uint32_t>, non_native, tensorstore::c_order,
false);
}
TEST(TryViewCordAsArrayTest, FlatCordBuilder) {
constexpr size_t kExtraBytes = 8;
tensorstore::internal::FlatCordBuilder builder(sizeof(uint32_t) * 3 * 4 * 5 +
kExtraBytes);
StridedLayout<> layout(tensorstore::c_order, sizeof(uint32_t), {3, 4, 5});
char* data_ptr = builder.data();
auto cord = std::move(builder).Build();
for (size_t offset = 0; offset < kExtraBytes; ++offset) {
auto array = TryViewCordAsArray(cord, offset, dtype_v<uint32_t>,
endian::native, layout);
if ((offset % alignof(uint32_t)) == 0) {
EXPECT_EQ(static_cast<void*>(data_ptr + offset), array.data());
EXPECT_EQ(layout, array.layout());
} else {
EXPECT_FALSE(array.valid());
}
}
}
TEST(TryViewCordAsArrayTest, Fragmented) {
std::vector<std::string> parts{
std::string(sizeof(uint32_t) * 3 * 3 * 5, '\0'),
std::string(sizeof(uint32_t) * 3 * 1 * 5, '\0')};
StridedLayout<> layout(tensorstore::c_order, sizeof(uint32_t), {3, 4, 5});
absl::Cord cord = absl::MakeFragmentedCord(parts);
auto array = TryViewCordAsArray(cord, 0, dtype_v<uint32_t>,
endian::native, layout);
EXPECT_FALSE(array.valid());
}
TEST(TryViewCordAsArrayTest, SmallBuffer) {
StridedLayout<> layout(tensorstore::c_order, sizeof(uint8_t), {4});
absl::Cord cord("abcd");
auto array = TryViewCordAsArray(cord, 0, dtype_v<uint8_t>,
endian::native, layout);
EXPECT_FALSE(array.valid());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/data_type_endian_conversion.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/data_type_endian_conversion_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
413701bb-1b33-4589-a48b-cb5756f4bc52 | cpp | google/tensorstore | storage_statistics | tensorstore/internal/storage_statistics.cc | tensorstore/driver/n5/storage_statistics_test.cc | #include "tensorstore/internal/storage_statistics.h"
#include <stdint.h>
#include <atomic>
#include <utility>
#include "tensorstore/array_storage_statistics.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal {
GetStorageStatisticsAsyncOperationState::
GetStorageStatisticsAsyncOperationState(
Future<ArrayStorageStatistics>& future,
const GetArrayStorageStatisticsOptions& options)
: options(options) {
auto p = PromiseFuturePair<ArrayStorageStatistics>::Make(std::in_place);
this->promise = std::move(p.promise);
future = std::move(p.future);
}
void GetStorageStatisticsAsyncOperationState::MaybeStopEarly() {
if (options.mask & ArrayStorageStatistics::query_not_stored) {
if (chunks_present.load() == 0) {
return;
}
}
if (options.mask & ArrayStorageStatistics::query_fully_stored) {
if (chunk_missing.load() == false) {
return;
}
}
SetDeferredResult(promise, ArrayStorageStatistics{});
}
GetStorageStatisticsAsyncOperationState::
~GetStorageStatisticsAsyncOperationState() {
auto& r = promise.raw_result();
if (!r.ok()) return;
r->mask = options.mask;
int64_t num_present = chunks_present.load(std::memory_order_relaxed);
if (options.mask & ArrayStorageStatistics::query_not_stored) {
r->not_stored = (num_present == 0);
}
if (options.mask & ArrayStorageStatistics::query_fully_stored) {
r->fully_stored = num_present == total_chunks;
}
}
}
} | #include <gtest/gtest.h>
#include "tensorstore/driver/zarr/storage_statistics_test_util.h"
namespace {
using ::tensorstore::internal_zarr::ZarrLikeStorageStatisticsTest;
using ::tensorstore::internal_zarr::ZarrLikeStorageStatisticsTestParams;
INSTANTIATE_TEST_SUITE_P(N5StorageStatisticsTest, ZarrLikeStorageStatisticsTest,
::testing::Values(ZarrLikeStorageStatisticsTestParams{
"n5", '/'}),
::testing::PrintToStringParamName());
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/storage_statistics.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/storage_statistics_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
684a40ab-72e9-4819-b22d-86644fa5ade0 | cpp | google/tensorstore | decoded_matches | tensorstore/internal/decoded_matches.cc | tensorstore/internal/decoded_matches_test.cc | #include "tensorstore/internal/decoded_matches.h"
#include <functional>
#include <ostream>
#include <string>
#include <string_view>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
namespace {
using DecodeFunction = std::function<Result<std::string>(std::string_view)>;
class Matcher : public ::testing::MatcherInterface<absl::Cord> {
public:
Matcher(::testing::Matcher<std::string_view> value_matcher,
DecodeFunction decoder)
: value_matcher_(std::move(value_matcher)),
decoder_(std::move(decoder)) {}
bool MatchAndExplain(
absl::Cord value,
::testing::MatchResultListener* listener) const override {
auto decoded = decoder_(value.Flatten());
if (!decoded.ok()) {
*listener << "Failed to decode value: " << decoded.status();
return false;
}
return value_matcher_.MatchAndExplain(*decoded, listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "when decoded ";
value_matcher_.DescribeTo(os);
}
private:
::testing::Matcher<std::string_view> value_matcher_;
DecodeFunction decoder_;
};
}
::testing::Matcher<absl::Cord> DecodedMatches(
::testing::Matcher<std::string_view> value_matcher,
DecodeFunction decoder) {
return ::testing::MakeMatcher(
new Matcher(std::move(value_matcher), std::move(decoder)));
}
}
} | #include "tensorstore/internal/decoded_matches.h"
#include <cstddef>
#include <sstream>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal::DecodedMatches;
tensorstore::Result<std::string> Stride2Decoder(std::string_view input) {
if (input.size() % 2 != 0) {
return absl::InvalidArgumentError("");
}
std::string output;
for (size_t i = 0; i < input.size(); i += 2) {
output += input[i];
}
return output;
}
TEST(DecodedMatchesTest, Describe) {
std::ostringstream ss;
DecodedMatches("x", Stride2Decoder).DescribeTo(&ss);
EXPECT_EQ("when decoded is equal to \"x\"", ss.str());
}
TEST(DecodedMatchesTest, ExplainValueMatcher) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(
DecodedMatches(::testing::SizeIs(3), Stride2Decoder), absl::Cord("xy"),
&listener);
EXPECT_EQ("whose size 1 doesn't match", listener.str());
}
TEST(DecodedMatchesTest, ExplainDecodeError) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(DecodedMatches("x", Stride2Decoder),
absl::Cord("xyz"), &listener);
EXPECT_EQ("Failed to decode value: INVALID_ARGUMENT: ", listener.str());
}
TEST(DecodedMatchesTest, Matches) {
EXPECT_THAT(absl::Cord("abcd"), DecodedMatches("ac", Stride2Decoder));
EXPECT_THAT(absl::Cord("abc"),
::testing::Not(DecodedMatches("ac", Stride2Decoder)));
EXPECT_THAT(absl::Cord("abcd"),
::testing::Not(DecodedMatches("ab", Stride2Decoder)));
EXPECT_THAT(absl::Cord("abcd"),
DecodedMatches(::testing::Not("ab"), Stride2Decoder));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/decoded_matches.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/decoded_matches_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
79f89674-bf24-4505-9db9-114f61502a06 | cpp | google/tensorstore | grid_chunk_key_ranges | tensorstore/internal/grid_chunk_key_ranges.cc | tensorstore/internal/grid_chunk_key_ranges_test.cc | #include "tensorstore/internal/grid_chunk_key_ranges.h"
#include <cassert>
#include <string>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
absl::Status GetChunkKeyRangesForRegularGridWithSemiLexicographicalKeys(
const internal_grid_partition::IndexTransformGridPartition& grid_partition,
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
internal::OutputToGridCellFn output_to_grid_cell, BoxView<> grid_bounds,
const LexicographicalGridIndexKeyFormatter& key_formatter,
absl::FunctionRef<absl::Status(std::string key,
tensorstore::span<const Index> grid_indices)>
handle_key,
absl::FunctionRef<absl::Status(KeyRange key_range, BoxView<> grid_bounds)>
handle_key_range) {
Box<dynamic_rank(kMaxRank)> grid_bounds_copy(grid_bounds);
assert(grid_output_dimensions.size() == grid_bounds.rank());
DimensionIndex cached_min_grid_index_for_lexicographical_order_dim = -1;
Index cached_min_grid_index_for_lexicographical_order;
const auto get_min_grid_index_for_lexicographical_order =
[&](DimensionIndex dim) {
if (dim == cached_min_grid_index_for_lexicographical_order_dim) {
return cached_min_grid_index_for_lexicographical_order;
}
cached_min_grid_index_for_lexicographical_order_dim = dim;
return cached_min_grid_index_for_lexicographical_order =
key_formatter.MinGridIndexForLexicographicalOrder(
dim, grid_bounds[dim]);
};
const auto forward_bounds =
[&](BoxView<> bounds, DimensionIndex outer_prefix_rank) -> absl::Status {
if (bounds.num_elements() == 1) {
return handle_key(key_formatter.FormatKey(bounds.origin()),
bounds.origin());
}
assert(outer_prefix_rank < bounds.rank());
if (bounds[outer_prefix_rank] == grid_bounds[outer_prefix_rank]) {
return handle_key_range(KeyRange::Prefix(key_formatter.FormatKey(
bounds.origin().first(outer_prefix_rank))),
bounds);
}
DimensionIndex key_dims = outer_prefix_rank + 1;
Index inclusive_max_indices[kMaxRank];
for (DimensionIndex i = 0; i < key_dims; ++i) {
inclusive_max_indices[i] = bounds[i].inclusive_max();
}
return handle_key_range(
KeyRange(key_formatter.FormatKey(bounds.origin().first(key_dims)),
KeyRange::PrefixExclusiveMax(
key_formatter.FormatKey(tensorstore::span<const Index>(
&inclusive_max_indices[0], key_dims)))),
bounds);
};
const auto handle_interval = [&](BoxView<> bounds) -> absl::Status {
DimensionIndex outer_prefix_rank = 0;
while (outer_prefix_rank < bounds.rank() &&
bounds.shape()[outer_prefix_rank] == 1) {
++outer_prefix_rank;
}
if (outer_prefix_rank == bounds.rank() ||
bounds[outer_prefix_rank] == grid_bounds[outer_prefix_rank]) {
return forward_bounds(bounds, outer_prefix_rank);
}
const Index min_index_for_lexicographical_order =
get_min_grid_index_for_lexicographical_order(outer_prefix_rank);
if (min_index_for_lexicographical_order <=
bounds.origin()[outer_prefix_rank]) {
return forward_bounds(bounds, outer_prefix_rank);
}
Box<dynamic_rank(kMaxRank)> new_bounds(bounds);
IndexInterval inner_interval = bounds[outer_prefix_rank];
while (!inner_interval.empty() && inner_interval.inclusive_min() <
min_index_for_lexicographical_order) {
new_bounds[outer_prefix_rank] =
IndexInterval::UncheckedSized(inner_interval.inclusive_min(), 1);
TENSORSTORE_RETURN_IF_ERROR(
forward_bounds(new_bounds, outer_prefix_rank + 1));
inner_interval = IndexInterval::UncheckedClosed(
inner_interval.inclusive_min() + 1, inner_interval.inclusive_max());
}
if (inner_interval.empty()) return absl::OkStatus();
new_bounds[outer_prefix_rank] = inner_interval;
return forward_bounds(new_bounds, inner_interval.size() == 1
? outer_prefix_rank + 1
: outer_prefix_rank);
};
return internal_grid_partition::GetGridCellRanges(
grid_partition, grid_output_dimensions, grid_bounds, output_to_grid_cell,
transform, handle_interval);
}
}
} | #include "tensorstore/internal/grid_chunk_key_ranges.h"
#include <cassert>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/grid_chunk_key_ranges_base10.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::CeilOfRatio;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::KeyRange;
using ::tensorstore::kMaxRank;
using ::tensorstore::Result;
using ::tensorstore::internal::Base10LexicographicalGridIndexKeyParser;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
using ::testing::Optional;
using R = std::tuple<KeyRange, Box<>>;
absl::Status GetChunkKeyRangesForRegularGridWithBase10Keys(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
tensorstore::span<const Index> chunk_shape,
tensorstore::span<const Index> shape, char dimension_separator,
absl::FunctionRef<absl::Status(std::string key,
tensorstore::span<const Index> grid_indices)>
handle_key,
absl::FunctionRef<absl::Status(KeyRange key_range, BoxView<> grid_bounds)>
handle_key_range) {
const DimensionIndex rank = grid_output_dimensions.size();
assert(rank == chunk_shape.size());
assert(rank == shape.size());
Box<dynamic_rank(kMaxRank)> grid_bounds(rank);
for (DimensionIndex i = 0; i < shape.size(); ++i) {
const Index grid_size = CeilOfRatio(shape[i], chunk_shape[i]);
grid_bounds[i] = IndexInterval::UncheckedSized(0, grid_size);
}
RegularGridRef grid{chunk_shape};
IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, grid_partition));
return GetChunkKeyRangesForRegularGridWithSemiLexicographicalKeys(
grid_partition, transform, grid_output_dimensions, grid, grid_bounds,
Base10LexicographicalGridIndexKeyParser{rank, dimension_separator},
handle_key, handle_key_range);
}
Result<std::vector<R>> GetRanges(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
tensorstore::span<const Index> chunk_shape,
tensorstore::span<const Index> shape, char dimension_separator) {
std::vector<R> ranges;
const auto handle_key =
[&](std::string key,
tensorstore::span<const Index> grid_indices) -> absl::Status {
ranges.emplace_back(
KeyRange::Singleton(key),
Box<>(grid_indices, std::vector<Index>(grid_indices.size(), 1)));
return absl::OkStatus();
};
const auto handle_key_range = [&](KeyRange key_range,
BoxView<> grid_bounds) -> absl::Status {
ranges.emplace_back(std::move(key_range), grid_bounds);
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(GetChunkKeyRangesForRegularGridWithBase10Keys(
transform, grid_output_dimensions, chunk_shape, shape,
dimension_separator, handle_key, handle_key_range));
return ranges;
}
TEST(ChunkKeyRangesTest, Rank0) {
EXPECT_THAT(GetRanges(IndexTransformBuilder(0, 0).Finalize().value(),
{}, {},
{}, '/'),
Optional(ElementsAre(R{KeyRange::Singleton("0"), {}})));
}
TEST(ChunkKeyRangesTest, Rank1Unconstrained) {
EXPECT_THAT(GetRanges(IndexTransformBuilder(1, 1)
.input_shape({50})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{5}},
{{50}}, '/'),
Optional(ElementsAre(R{KeyRange(), Box<>{{0}, {10}}})));
}
TEST(ChunkKeyRangesTest, Rank1Constrained) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(1, 1)
.input_origin({7})
.input_shape({30})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{5}},
{{50}}, '/'),
Optional(ElementsAre(R{KeyRange("1", KeyRange::PrefixExclusiveMax("7")),
Box<>{{1}, {7}}})));
}
TEST(ChunkKeyRangesTest, Rank1ConstrainedSplit) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(1, 1)
.input_origin({8})
.input_exclusive_max({13})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{1}},
{{20}}, '/'),
Optional(ElementsAre(R{KeyRange::Singleton("8"), Box<>{{8}, {1}}},
R{KeyRange::Singleton("9"), Box<>{{9}, {1}}},
R{KeyRange("10", KeyRange::PrefixExclusiveMax("12")),
Box<>{{10}, {3}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedBothDims) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({6, 7})
.input_shape({8, 30})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 10}},
{{25, 100}}, '/'),
Optional(
ElementsAre(R{KeyRange("1/0", KeyRange::PrefixExclusiveMax("1/3")),
Box<>{{1, 0}, {1, 4}}},
R{KeyRange("2/0", KeyRange::PrefixExclusiveMax("2/3")),
Box<>{{2, 0}, {1, 4}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedFirstDimOnly) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({6, 0})
.input_shape({8, 50})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 5}},
{{25, 50}}, '/'),
Optional(ElementsAre(R{KeyRange("1/", KeyRange::PrefixExclusiveMax("2/")),
Box<>{{1, 0}, {2, 10}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedFirstDimOnlySplit) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({8, 0})
.input_shape({5, 50})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{1, 5}},
{{25, 50}}, '/'),
Optional(
ElementsAre(R{KeyRange::Prefix("8/"), Box<>{{8, 0}, {1, 10}}},
R{KeyRange::Prefix("9/"), Box<>{{9, 0}, {1, 10}}},
R{KeyRange("10/", "120"), Box<>{{10, 0}, {3, 10}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedSecondDimOnly) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({0, 7})
.input_shape({25, 30})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 5}},
{{25, 50}}, '/'),
Optional(
ElementsAre(R{KeyRange("0/1", KeyRange::PrefixExclusiveMax("0/7")),
Box<>{{0, 1}, {1, 7}}},
R{KeyRange("1/1", KeyRange::PrefixExclusiveMax("1/7")),
Box<>{{1, 1}, {1, 7}}},
R{KeyRange("2/1", KeyRange::PrefixExclusiveMax("2/7")),
Box<>{{2, 1}, {1, 7}}},
R{KeyRange("3/1", KeyRange::PrefixExclusiveMax("3/7")),
Box<>{{3, 1}, {1, 7}}},
R{KeyRange("4/1", KeyRange::PrefixExclusiveMax("4/7")),
Box<>{{4, 1}, {1, 7}}})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_chunk_key_ranges.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_chunk_key_ranges_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7d0d2827-9665-4108-9bf1-a5f64ad67a81 | cpp | google/tensorstore | irregular_grid | tensorstore/internal/irregular_grid.cc | tensorstore/internal/irregular_grid_test.cc | #include "tensorstore/internal/irregular_grid.h"
#include <assert.h>
#include <stddef.h>
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
IrregularGrid::IrregularGrid(std::vector<std::vector<Index>> inclusive_mins)
: shape_(inclusive_mins.size(), 0),
inclusive_mins_(std::move(inclusive_mins)) {
for (size_t i = 0; i < inclusive_mins_.size(); i++) {
std::sort(inclusive_mins_[i].begin(), inclusive_mins_[i].end());
auto new_it =
std::unique(inclusive_mins_[i].begin(), inclusive_mins_[i].end());
inclusive_mins_[i].resize(
std::distance(inclusive_mins_[i].begin(), new_it));
shape_[i] = inclusive_mins_[i].size() - 1;
}
}
Index IrregularGrid::operator()(DimensionIndex dim, Index output_index,
IndexInterval* cell_bounds) const {
auto points = inclusive_min(dim);
auto it = std::upper_bound(points.begin(), points.end(), output_index);
Index cell = std::distance(points.begin(), it) - 1;
if (cell_bounds) {
if (cell < 0) {
*cell_bounds = IndexInterval::UncheckedHalfOpen(-kInfIndex, points[0]);
} else if (cell < points.size() - 1) {
*cell_bounds =
IndexInterval::UncheckedHalfOpen(points[cell], points[cell + 1]);
} else {
*cell_bounds = IndexInterval::UncheckedClosed(points[cell], kInfIndex);
}
}
return cell;
}
IrregularGrid IrregularGrid::Make(
tensorstore::span<const IndexDomain<>> domains) {
absl::InlinedVector<IndexDomainView<>, 16> views;
views.reserve(domains.size());
for (const auto& d : domains) views.push_back(d);
return Make(tensorstore::span(views));
}
IrregularGrid IrregularGrid::Make(
tensorstore::span<const IndexDomainView<>> domains) {
assert(!domains.empty());
DimensionIndex rank = domains[0].rank();
std::vector<std::vector<Index>> inclusive_mins;
inclusive_mins.resize(rank);
for (auto& d : domains) {
assert(d.rank() == rank);
for (DimensionIndex i = 0; i < rank; i++) {
if (inclusive_mins[i].empty() ||
inclusive_mins[i].back() != d[i].inclusive_min()) {
inclusive_mins[i].push_back(d[i].inclusive_min());
}
inclusive_mins[i].push_back(d[i].exclusive_max());
}
}
return IrregularGrid(std::move(inclusive_mins));
}
}
} | #include "tensorstore/internal/irregular_grid.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexInterval;
using ::tensorstore::kInfIndex;
using ::tensorstore::internal::IrregularGrid;
using ::testing::ElementsAre;
TEST(IrregularGridTest, Basic) {
std::vector<Index> dimension0{2, 0, -3};
std::vector<Index> dimension1{10, 45, 20, 30};
auto grid = IrregularGrid({dimension0, dimension1});
EXPECT_EQ(2, grid.rank());
EXPECT_THAT(grid.shape(), ElementsAre(2, 3));
EXPECT_THAT(grid.inclusive_min(0), ElementsAre(-3, 0, 2));
EXPECT_THAT(grid.inclusive_min(1), ElementsAre(10, 20, 30, 45));
IndexInterval grid_cell;
EXPECT_EQ(grid(0, -4, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, -4));
EXPECT_EQ(grid(0, -3, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -2, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -1, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, 0, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 1, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 2, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(0, 3, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(1, 7, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, 9));
EXPECT_EQ(grid(1, 11, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(10, 10));
EXPECT_EQ(grid(1, 57, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(45, kInfIndex));
}
TEST(IrregularGridTest, IndexDomain) {
const Index origin1[] = {-3, 10};
const Index shape1[] = {3, 10};
const Index origin2[] = {0, 20};
const Index shape2[] = {2, 10};
const Index origin3[] = {0, 30};
const Index shape3[] = {2, 15};
std::vector<IndexDomain<>> domains(
{IndexDomain<>{
BoxView<>{tensorstore::span(origin1), tensorstore::span(shape1)}},
IndexDomain<>{
BoxView<>{tensorstore::span(origin2), tensorstore::span(shape2)}},
IndexDomain<>{
BoxView<>{tensorstore::span(origin3), tensorstore::span(shape3)}}});
auto grid = IrregularGrid::Make(domains);
EXPECT_EQ(2, grid.rank());
EXPECT_THAT(grid.shape(), ElementsAre(2, 3));
EXPECT_THAT(grid.inclusive_min(0), ElementsAre(-3, 0, 2));
EXPECT_THAT(grid.inclusive_min(1), ElementsAre(10, 20, 30, 45));
IndexInterval grid_cell;
EXPECT_EQ(grid(0, -4, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, -4));
EXPECT_EQ(grid(0, -3, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -2, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -1, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, 0, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 1, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 2, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(0, 3, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(1, 7, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, 9));
EXPECT_EQ(grid(1, 11, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(10, 10));
EXPECT_EQ(grid(1, 57, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(45, kInfIndex));
}
TEST(IrregularGridTest, Rank0) {
std::vector<std::vector<Index>> inclusive_mins;
auto grid = IrregularGrid(inclusive_mins);
EXPECT_EQ(0, grid.rank());
EXPECT_TRUE(grid.shape().empty());
EXPECT_TRUE(grid.cell_origin({}).empty());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/irregular_grid.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/irregular_grid_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d805150c-0a56-4080-b44d-3ffd094bf8ae | cpp | google/tensorstore | path | tensorstore/internal/path.cc | tensorstore/internal/path_test.cc | #include "tensorstore/internal/path.h"
#include <cstddef>
#include <initializer_list>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace {
#ifdef _WIN32
constexpr inline bool IsDirSeparator(char c) { return c == '\\' || c == '/'; }
#else
constexpr inline bool IsDirSeparator(char c) { return c == '/'; }
#endif
}
namespace tensorstore {
namespace internal_path {
std::string JoinPathImpl(std::initializer_list<std::string_view> paths) {
size_t s = 0;
for (std::string_view path : paths) {
s += path.size() + 1;
}
std::string result;
result.reserve(s);
for (std::string_view path : paths) {
internal::AppendPathComponent(result, path);
}
return result;
}
}
namespace internal {
std::pair<std::string_view, std::string_view> PathDirnameBasename(
std::string_view path) {
size_t pos = path.size();
while (pos != 0 && !IsDirSeparator(path[pos - 1])) {
--pos;
}
size_t basename = pos;
--pos;
if (pos == std::string_view::npos) {
return {"", path};
}
while (pos != 0 && IsDirSeparator(path[pos - 1])) {
--pos;
}
if (pos == 0) {
return {"/", path.substr(basename)};
}
return {path.substr(0, pos), path.substr(basename)};
}
void EnsureDirectoryPath(std::string& path) {
if (path.size() == 1 && path[0] == '/') {
path.clear();
} else if (!path.empty() && path.back() != '/') {
path += '/';
}
}
void EnsureNonDirectoryPath(std::string& path) {
size_t size = path.size();
while (size > 0 && path[size - 1] == '/') {
--size;
}
path.resize(size);
}
void AppendPathComponent(std::string& path, std::string_view component) {
if (!path.empty() && path.back() != '/' && !component.empty() &&
component.front() != '/') {
absl::StrAppend(&path, "/", component);
} else {
path += component;
}
}
std::string LexicalNormalizePath(std::string path) {
if (path.empty()) return path;
const char* src = path.c_str();
auto dst = path.begin();
const bool is_absolute_path = (*src == '/');
if (is_absolute_path) {
dst++;
src++;
while (*src == '/') ++src;
}
auto limit = dst;
while (*src) {
bool parsed = false;
if (src[0] == '.') {
if (src[1] == '/' || src[1] == '\\' || !src[1]) {
if (*++src) {
++src;
}
parsed = true;
} else if (src[1] == '.' &&
(src[2] == '/' || src[2] == '\\' || !src[2])) {
src += 2;
if (dst != limit) {
for (--dst; dst != limit && dst[-1] != '/'; --dst) {
}
} else if (!is_absolute_path) {
src -= 2;
*dst++ = *src++;
*dst++ = *src++;
if (*src) {
*dst++ = *src;
}
limit = dst;
}
if (*src) {
++src;
}
parsed = true;
}
}
if (!parsed) {
while (*src && *src != '/' && *src != '\\') {
*dst++ = *src++;
}
if (*src) {
*dst++ = '/';
src++;
}
}
while (*src == '/' || *src == '\\') {
++src;
}
}
path.resize(dst - path.begin());
return path;
}
}
} | #include "tensorstore/internal/path.h"
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::EnsureDirectoryPath;
using ::tensorstore::internal::EnsureNonDirectoryPath;
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::LexicalNormalizePath;
using ::tensorstore::internal::PathDirnameBasename;
using ::testing::StrEq;
TEST(PathTest, JoinPath) {
EXPECT_EQ("/foo/bar", JoinPath("/foo", "bar"));
EXPECT_EQ("/foo/bar", JoinPath("/foo/", "bar"));
EXPECT_EQ("/foo/bar", JoinPath("/foo", "/bar"));
EXPECT_EQ("/foo
EXPECT_EQ("foo/bar", JoinPath("foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "/bar"));
EXPECT_EQ("/bar", JoinPath("", "/bar"));
EXPECT_EQ("bar", JoinPath("", "bar"));
EXPECT_EQ("/foo", JoinPath("/foo", ""));
EXPECT_EQ("/foo/bar/baz
JoinPath("/foo/bar/baz/", "/blah/blink/biz"));
EXPECT_EQ("/foo/bar/baz/blah", JoinPath("/foo", "bar", "baz", "blah"));
EXPECT_EQ("http:
}
TEST(PathTest, JoinPath_MixedArgs) {
constexpr const char kFoo[] = "/foo";
std::string_view foo_view("/foo");
std::string foo("/foo");
EXPECT_EQ("/foo/bar", JoinPath(foo_view, "bar"));
EXPECT_EQ("/foo/bar", JoinPath(foo, "bar"));
EXPECT_EQ("/foo/bar", JoinPath(kFoo, "/bar"));
}
TEST(PathTest, PathDirnameBasename) {
EXPECT_EQ("/a/b", PathDirnameBasename("/a/b/bar").first);
EXPECT_EQ("bar", PathDirnameBasename("/a/b/bar").second);
EXPECT_EQ("/a/b/bar", PathDirnameBasename("/a/b/bar/").first);
EXPECT_EQ("", PathDirnameBasename("/a/b/bar/").second);
EXPECT_EQ("", PathDirnameBasename("").first);
EXPECT_EQ("", PathDirnameBasename("").second);
EXPECT_EQ("/", PathDirnameBasename("/").first);
EXPECT_EQ("", PathDirnameBasename("/").second);
EXPECT_EQ("a/b", PathDirnameBasename("a/b/bar").first);
EXPECT_EQ("bar", PathDirnameBasename("a/b/bar").second);
EXPECT_EQ("", PathDirnameBasename("bar").first);
EXPECT_EQ("bar", PathDirnameBasename("bar").second);
EXPECT_EQ("/", PathDirnameBasename("/bar").first);
EXPECT_EQ("bar", PathDirnameBasename("/bar").second);
EXPECT_EQ("
EXPECT_EQ("bar", PathDirnameBasename("
EXPECT_EQ("/", PathDirnameBasename("
EXPECT_EQ("bar", PathDirnameBasename("
}
TEST(EnsureDirectoryPathTest, EmptyString) {
std::string path = "";
EnsureDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureDirectoryPathTest, SingleSlash) {
std::string path = "/";
EnsureDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureDirectoryPathTest, NonEmptyWithoutSlash) {
std::string path = "abc";
EnsureDirectoryPath(path);
EXPECT_EQ("abc/", path);
}
TEST(EnsureDirectoryPathTest, NonEmptyWithSlash) {
std::string path = "abc/";
EnsureDirectoryPath(path);
EXPECT_EQ("abc/", path);
}
TEST(EnsureNonDirectoryPathTest, EmptyString) {
std::string path = "";
EnsureNonDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureNonDirectoryPathTest, SingleSlash) {
std::string path = "/";
EnsureNonDirectoryPath(path);
EXPECT_EQ("", path);
}
TEST(EnsureNonDirectoryPathTest, NonEmptyWithoutSlash) {
std::string path = "abc";
EnsureNonDirectoryPath(path);
EXPECT_EQ("abc", path);
}
TEST(EnsureNonDirectoryPathTest, NonEmptyWithSlash) {
std::string path = "abc/";
EnsureNonDirectoryPath(path);
EXPECT_EQ("abc", path);
}
TEST(EnsureNonDirectoryPathTest, NonEmptyWithSlashes) {
std::string path = "abc
EnsureNonDirectoryPath(path);
EXPECT_EQ("abc", path);
}
TEST(PathTest, LexicalNormalizePath) {
EXPECT_THAT(LexicalNormalizePath("/"), StrEq("/"));
EXPECT_THAT(LexicalNormalizePath("a/b/c"), StrEq("a/b/c"));
EXPECT_THAT(LexicalNormalizePath("/a/b/c"), StrEq("/a/b/c"));
EXPECT_THAT(LexicalNormalizePath("a/b/c/"), StrEq("a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("/a/b/c/"), StrEq("/a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("a\\b\\c/"), StrEq("a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("C:\\a/b\\c\\"), StrEq("C:/a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("a/b/./c"), StrEq("a/b/c"));
EXPECT_THAT(LexicalNormalizePath("./a/b/c/"), StrEq("a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("a/b/c/./"), StrEq("a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("a/b/c/."), StrEq("a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("a/b/bb/../c/"), StrEq("a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("a/b/c/bb/.."), StrEq("a/b/c/"));
EXPECT_THAT(LexicalNormalizePath("../a/b/c"), StrEq("../a/b/c"));
EXPECT_THAT(LexicalNormalizePath("/../a/b/c"), StrEq("/a/b/c"));
EXPECT_THAT(LexicalNormalizePath("
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/path.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/path_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a07e1ff9-4f11-46b0-a33c-e1d5b746ce05 | cpp | google/tensorstore | nditerable_elementwise_output_transform | tensorstore/internal/nditerable_elementwise_output_transform.cc | tensorstore/internal/nditerable_elementwise_output_transform_test.cc | #include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include <array>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
struct ElementwiseOutputTransformNDIterator
: public NDIterator::Base<ElementwiseOutputTransformNDIterator> {
explicit ElementwiseOutputTransformNDIterator(
const NDIterable* output, ElementwiseClosure<2, void*> closure,
NDIterable::IterationBufferKindLayoutView layout,
ArenaAllocator<> allocator)
: output_(tensorstore::span(&output, 1), layout, allocator),
context_(closure.context),
elementwise_function_((*closure.function)[layout.buffer_kind]) {}
ArenaAllocator<> get_allocator() const override {
return output_.get_allocator();
}
bool UpdateBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer pointer,
absl::Status* status) override {
return output_.GetBlock(indices, block_shape, status) &&
elementwise_function_(context_, block_shape, pointer,
output_.block_pointers()[0], status) &&
output_.UpdateBlock(indices, block_shape, status);
}
NDIteratorsWithManagedBuffers<1> output_;
void* context_;
SpecializedElementwiseFunctionPointer<2, void*> elementwise_function_;
};
struct ElementwiseOutputTransformNDIterable
: public NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, 1>,
NDIterable::Base<ElementwiseOutputTransformNDIterable>> {
using Base = NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, 1>,
NDIterable::Base<ElementwiseOutputTransformNDIterable>>;
ElementwiseOutputTransformNDIterable(NDIterable::Ptr output,
DataType input_dtype,
ElementwiseClosure<2, void*> closure,
ArenaAllocator<> allocator)
: Base{{{std::move(output)}}},
input_dtype_(input_dtype),
closure_(closure),
allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
DataType dtype() const override { return input_dtype_; }
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseOutputTransformNDIterator>(
allocator_, this->iterables[0].get(), closure_, layout);
}
DataType input_dtype_;
ElementwiseClosure<2, void*> closure_;
ArenaAllocator<> allocator_;
};
}
NDIterable::Ptr GetElementwiseOutputTransformNDIterable(
NDIterable::Ptr output, DataType input_dtype,
ElementwiseClosure<2, void*> closure, Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseOutputTransformNDIterable>(
ArenaAllocator<>(arena), std::move(output), input_dtype, closure);
}
}
} | #include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include <new>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::NDIterableCopier;
using ::testing::_;
using ::testing::Pair;
template <typename Func, typename SourceArray, typename DestArray>
absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints,
SourceArray source_array, DestArray dest_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<2, void*> closure =
tensorstore::internal::SimpleElementwiseFunction<
Func(typename SourceArray::Element, typename DestArray::Element),
void*>::Closure(&func);
auto iterable =
tensorstore::internal::GetElementwiseOutputTransformNDIterable(
tensorstore::internal::GetTransformedArrayNDIterable(dest_array,
&arena)
.value(),
tensorstore::dtype_v<typename SourceArray::Element>, closure, &arena);
return tensorstore::internal::NDIterableCopier(
*tensorstore::internal::GetTransformedArrayNDIterable(source_array,
&arena)
.value(),
*iterable, dest_array.shape(), constraints, &arena)
.Copy();
}
TEST(NDIterableElementwiseOutputTransformTest, Basic) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* status) { *dest = -*source; },
{}, source, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseOutputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, source, dest),
absl::UnknownError("zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_output_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_output_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
898920dc-dceb-4acd-a85e-233daf3f4737 | cpp | google/tensorstore | retry | tensorstore/internal/retry.cc | tensorstore/internal/retry_test.cc | #include "tensorstore/internal/retry.h"
#include <stdint.h>
#include <cassert>
#include "absl/random/random.h"
#include "absl/time/time.h"
namespace tensorstore {
namespace internal {
absl::Duration BackoffForAttempt(int attempt, absl::Duration initial_delay,
absl::Duration max_delay,
absl::Duration jitter) {
assert(initial_delay > absl::ZeroDuration());
assert(max_delay >= initial_delay);
assert(attempt >= 0);
int64_t multiple = int64_t{1} << (attempt > 62 ? 62 : attempt);
auto delay = initial_delay * multiple;
int64_t jitter_us = absl::ToInt64Microseconds(jitter);
if (jitter_us > 0) {
delay += absl::Microseconds(absl::Uniform(
absl::IntervalClosed, absl::InsecureBitGen{}, 0, jitter_us));
}
if (delay > max_delay) delay = max_delay;
return delay;
}
}
} | #include "tensorstore/internal/retry.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
namespace {
using ::tensorstore::internal::BackoffForAttempt;
TEST(RetryTest, BackoffForAttempt) {
EXPECT_EQ(absl::Microseconds(1),
BackoffForAttempt(0, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_EQ(absl::Microseconds(2),
BackoffForAttempt(1, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_EQ(absl::Microseconds(4),
BackoffForAttempt(2, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_EQ(
absl::Microseconds(100),
BackoffForAttempt(66, absl::Microseconds(1), absl::Microseconds(100),
absl::ZeroDuration()));
EXPECT_THAT(absl::ToInt64Microseconds(BackoffForAttempt(
2, absl::Microseconds(1), absl::Microseconds(200),
absl::Microseconds(100))),
::testing::AllOf(::testing::Ge(2), testing::Le(104)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/retry.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/retry_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
70f4d2fa-0102-416a-8f90-3068250b5d23 | cpp | google/tensorstore | nditerable_transformed_array | tensorstore/internal/nditerable_transformed_array.cc | tensorstore/internal/nditerable_transformed_array_test.cc | #include "tensorstore/internal/nditerable_transformed_array.h"
#include <cassert>
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_array_util.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace input_dim_iter_flags =
internal_index_space::input_dimension_iteration_flags;
namespace {
class IterableImpl : public NDIterable::Base<IterableImpl> {
public:
IterableImpl(IndexTransform<> transform, allocator_type allocator)
: transform_(std::move(transform)),
input_dimension_flags_(transform_.input_rank(),
input_dim_iter_flags::can_skip, allocator) {}
allocator_type get_allocator() const override {
return input_dimension_flags_.get_allocator();
}
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
auto flags_i = input_dimension_flags_[dim_i];
if ((flags_i & input_dim_iter_flags::array_indexed) !=
(input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {
return (flags_i & input_dim_iter_flags::array_indexed) ? -2 : 2;
}
if (flags_i & input_dim_iter_flags::array_indexed) {
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
const int order = GetDimensionOrderFromByteStrides(
state_.index_array_byte_strides[i][dim_i],
state_.index_array_byte_strides[i][dim_j]);
if (order != 0) return order;
}
}
return GetDimensionOrderFromByteStrides(state_.input_byte_strides[dim_i],
state_.input_byte_strides[dim_j]);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
const DimensionIndex input_rank = transform_.input_rank();
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
UpdateDirectionPrefsFromByteStrides(
tensorstore::span(state_.index_array_byte_strides[i], input_rank),
prefs);
}
UpdateDirectionPrefsFromByteStrides(
tensorstore::span(&state_.input_byte_strides[0], input_rank), prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
auto flags_i = input_dimension_flags_[dim_i];
if ((flags_i & input_dim_iter_flags::array_indexed) !=
(input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {
return false;
}
if (flags_i & input_dim_iter_flags::array_indexed) {
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
if (!CanCombineStridedArrayDimensions(
state_.index_array_byte_strides[i][dim_i], dir_i,
state_.index_array_byte_strides[i][dim_j], dir_j, size_j)) {
return false;
}
}
}
return CanCombineStridedArrayDimensions(
state_.input_byte_strides[dim_i], dir_i,
state_.input_byte_strides[dim_j], dir_j, size_j);
}
DataType dtype() const override { return dtype_; }
IterationBufferConstraint GetIterationBufferConstraint(
IterationLayoutView layout) const override {
const DimensionIndex penultimate_dim =
layout.iteration_dimensions[layout.iteration_dimensions.size() - 2];
const DimensionIndex last_dim =
layout.iteration_dimensions[layout.iteration_dimensions.size() - 1];
if ((last_dim == -1 || (input_dimension_flags_[last_dim] &
input_dim_iter_flags::array_indexed) == 0) &&
(penultimate_dim == -1 || (input_dimension_flags_[penultimate_dim] &
input_dim_iter_flags::array_indexed) == 0)) {
return {(last_dim == -1 || state_.input_byte_strides[last_dim] *
layout.directions[last_dim] ==
this->dtype_->size)
? IterationBufferKind::kContiguous
: IterationBufferKind::kStrided,
false};
} else {
return {IterationBufferKind::kIndexed, false};
}
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0;
}
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<IteratorImpl>(
get_allocator(), this, layout);
}
class IteratorImpl : public NDIterator::Base<IteratorImpl> {
public:
IteratorImpl(const IterableImpl* iterable,
NDIterable::IterationBufferKindLayoutView layout,
allocator_type allocator)
: num_index_arrays_(
iterable->state_.num_array_indexed_output_dimensions),
num_index_array_iteration_dims_(0),
iterable_(iterable),
buffer_(
num_index_arrays_ +
layout.iteration_rank() * (num_index_arrays_ + 1) +
((layout.buffer_kind == IterationBufferKind::kIndexed)
? layout.block_shape[0] * layout.block_shape[1]
: 0),
allocator) {
static_assert(sizeof(Index) >= sizeof(void*));
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
ByteStridedPointer<const Index> index_array_pointer =
iterable->state_.index_array_pointers[j].get();
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
if (layout.directions[dim] != -1) continue;
const Index size_minus_1 = layout.shape[dim] - 1;
const Index index_array_byte_stride =
iterable->state_.index_array_byte_strides[j][dim];
index_array_pointer +=
wrap_on_overflow::Multiply(index_array_byte_stride, size_minus_1);
}
buffer_[j] = reinterpret_cast<Index>(index_array_pointer.get());
}
Index base_offset = 0;
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
if (layout.directions[dim] != -1) continue;
const Index size_minus_1 = layout.shape[dim] - 1;
const Index input_byte_stride =
iterable->state_.input_byte_strides[dim];
base_offset = wrap_on_overflow::Add(
base_offset,
wrap_on_overflow::Multiply(input_byte_stride, size_minus_1));
}
for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) {
const DimensionIndex dim = layout.iteration_dimensions[i];
if (dim == -1) {
for (DimensionIndex j = 0; j < num_index_arrays_ + 1; ++j) {
buffer_[num_index_arrays_ + layout.iteration_rank() * j + i] = 0;
}
} else {
const Index dir = layout.directions[dim];
const Index input_byte_stride =
iterable->state_.input_byte_strides[dim];
buffer_[num_index_arrays_ + i] =
wrap_on_overflow::Multiply(input_byte_stride, dir);
if (iterable->input_dimension_flags_[dim] &
input_dim_iter_flags::array_indexed) {
num_index_array_iteration_dims_ = i + 1;
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index index_array_byte_stride =
iterable->state_.index_array_byte_strides[j][dim];
buffer_[num_index_arrays_ + layout.iteration_rank() * (j + 1) +
i] =
wrap_on_overflow::Multiply(index_array_byte_stride, dir);
}
}
}
}
if (layout.buffer_kind == IterationBufferKind::kIndexed) {
Index* offsets_array =
buffer_.data() + num_index_arrays_ +
layout.iteration_rank() * (num_index_arrays_ + 1);
pointer_ =
IterationBufferPointer{iterable->state_.base_pointer + base_offset,
layout.block_shape[1], offsets_array};
if (num_index_array_iteration_dims_ + 1 < layout.iteration_rank()) {
FillOffsetsArrayFromStride(
buffer_[num_index_arrays_ + layout.iteration_rank() - 2],
buffer_[num_index_arrays_ + layout.iteration_rank() - 1],
layout.block_shape[0], layout.block_shape[1], offsets_array);
}
} else {
assert(num_index_array_iteration_dims_ + 1 < layout.iteration_rank());
pointer_ = IterationBufferPointer{
iterable->state_.base_pointer + base_offset,
buffer_[num_index_arrays_ + layout.iteration_rank() - 2],
buffer_[num_index_arrays_ + layout.iteration_rank() - 1]};
}
}
allocator_type get_allocator() const override {
return buffer_.get_allocator();
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
IterationBufferPointer block_pointer = pointer_;
block_pointer.pointer += IndexInnerProduct(
indices.size(), indices.data(), buffer_.data() + num_index_arrays_);
if (num_index_array_iteration_dims_ + 1 < indices.size()) {
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index index = ByteStridedPointer<const Index>(
reinterpret_cast<const Index*>(buffer_[j]))[IndexInnerProduct(
num_index_array_iteration_dims_, indices.data(),
buffer_.data() + num_index_arrays_ + indices.size() * (j + 1))];
block_pointer.pointer += wrap_on_overflow::Multiply(
iterable_->state_.index_array_output_byte_strides[j], index);
}
} else {
block_pointer.byte_offsets_outer_stride = block_shape[1];
Index* offsets_array = const_cast<Index*>(block_pointer.byte_offsets);
FillOffsetsArrayFromStride(
buffer_[num_index_arrays_ + indices.size() - 2],
buffer_[num_index_arrays_ + indices.size() - 1], block_shape[0],
block_shape[1], offsets_array);
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index* index_array_byte_strides =
buffer_.data() + num_index_arrays_ + indices.size() * (j + 1);
ByteStridedPointer<const Index> index_array_pointer =
ByteStridedPointer<const Index>(
reinterpret_cast<const Index*>(buffer_[j])) +
IndexInnerProduct(indices.size() - 2, indices.data(),
index_array_byte_strides);
const Index output_byte_stride =
iterable_->state_.index_array_output_byte_strides[j];
const Index penultimate_index_array_byte_stride =
index_array_byte_strides[indices.size() - 2];
const Index last_index_array_byte_stride =
index_array_byte_strides[indices.size() - 1];
if (last_index_array_byte_stride == 0 &&
penultimate_index_array_byte_stride == 0) {
block_pointer.pointer += wrap_on_overflow::Multiply(
output_byte_stride, *index_array_pointer);
} else {
Index block_start0 = indices[indices.size() - 2];
Index block_start1 = indices[indices.size() - 1];
for (Index outer = 0; outer < block_shape[0]; ++outer) {
for (Index inner = 0; inner < block_shape[1]; ++inner) {
Index cur_contribution = wrap_on_overflow::Multiply(
output_byte_stride,
index_array_pointer[wrap_on_overflow::Add(
wrap_on_overflow::Multiply(
outer + block_start0,
penultimate_index_array_byte_stride),
wrap_on_overflow::Multiply(
inner + block_start1,
last_index_array_byte_stride))]);
auto& offset = offsets_array[outer * block_shape[1] + inner];
offset = wrap_on_overflow::Add(offset, cur_contribution);
}
}
}
}
}
*pointer = block_pointer;
return true;
}
private:
DimensionIndex num_index_arrays_;
DimensionIndex num_index_array_iteration_dims_;
const IterableImpl* iterable_;
IterationBufferPointer pointer_;
std::vector<Index, ArenaAllocator<Index>> buffer_;
};
std::shared_ptr<const void> data_owner_;
IndexTransform<> transform_;
internal_index_space::SingleArrayIterationState state_;
DataType dtype_;
std::vector<input_dim_iter_flags::Bitmask,
ArenaAllocator<input_dim_iter_flags::Bitmask>>
input_dimension_flags_;
};
Result<NDIterable::Ptr> MaybeConvertToArrayNDIterable(
std::unique_ptr<IterableImpl, VirtualDestroyDeleter> impl, Arena* arena) {
if (impl->state_.num_array_indexed_output_dimensions == 0) {
return GetArrayNDIterable(
SharedOffsetArrayView<const void>(
SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(impl->data_owner_),
impl->state_.base_pointer),
impl->dtype_),
StridedLayoutView<>(impl->transform_.input_rank(),
impl->transform_.input_shape().data(),
&impl->state_.input_byte_strides[0])),
arena);
}
return impl;
}
}
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
SharedOffsetArrayView<const void> array, IndexTransformView<> transform,
Arena* arena) {
if (!transform.valid()) {
return GetArrayNDIterable(array, arena);
}
auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>(
ArenaAllocator<>(arena), transform);
TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(
array, internal_index_space::TransformAccess::rep(transform),
transform.input_origin().data(), transform.input_shape().data(),
&impl->state_, impl->input_dimension_flags_.data()));
impl->dtype_ = array.dtype();
impl->data_owner_ = std::move(array.element_pointer().pointer());
return MaybeConvertToArrayNDIterable(std::move(impl), arena);
}
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
TransformedArray<Shared<const void>> array, Arena* arena) {
auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>(
ArenaAllocator<>(arena), std::move(array.transform()));
TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(
ElementPointer<const void>(array.element_pointer()),
internal_index_space::TransformAccess::rep(impl->transform_),
impl->transform_.input_origin().data(),
impl->transform_.input_shape().data(), &impl->state_,
impl->input_dimension_flags_.data()));
impl->dtype_ = array.dtype();
impl->data_owner_ = std::move(array.element_pointer().pointer());
return MaybeConvertToArrayNDIterable(std::move(impl), arena);
}
}
} | #include "tensorstore/internal/nditerable_transformed_array.h"
#include <stddef.h>
#include <array>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllocateArray;
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::SharedArray;
using ::tensorstore::skip_repeated_elements;
using ::tensorstore::StridedLayout;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::GetTransformedArrayNDIterable;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferShape;
using ::tensorstore::internal::MultiNDIterator;
using ::tensorstore::internal::NDIterable;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::FieldsAre;
using ::testing::Pair;
using IterationTrace = std::vector<void*>;
template <typename... Element>
std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status>
GetIterationTrace(
MultiNDIterator<sizeof...(Element), true>* multi_iterator) {
std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status>
result;
for (auto block_shape = multi_iterator->ResetAtBeginning();
block_shape[0] && block_shape[1];
block_shape = multi_iterator->StepForward(block_shape)) {
if (!multi_iterator->GetBlock(block_shape, &result.second)) {
break;
}
ptrdiff_t i = 0;
const auto unused = {(
[&] {
const auto get_trace_func = [](void* ptr, IterationTrace* trace) {
trace->push_back(ptr);
};
tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =
tensorstore::internal::SimpleElementwiseFunction<
decltype(get_trace_func)(Element), IterationTrace*>();
func[multi_iterator->buffer_kind](nullptr, block_shape,
multi_iterator->block_pointers()[i],
&result.first[i]);
++i;
}(),
0)...};
(void)unused;
}
return result;
}
template <size_t N>
using BlockTrace =
std::vector<std::tuple<std::vector<Index>, IterationBufferShape,
std::array<IterationTrace, N>>>;
template <typename... Element>
std::pair<BlockTrace<sizeof...(Element)>, absl::Status> GetBlockTrace(
MultiNDIterator<sizeof...(Element), true>* multi_iterator) {
std::pair<BlockTrace<sizeof...(Element)>, absl::Status> result;
for (auto block_shape = multi_iterator->ResetAtBeginning();
block_shape[0] && block_shape[1];
block_shape = multi_iterator->StepForward(block_shape)) {
if (!multi_iterator->GetBlock(block_shape, &result.second)) {
break;
}
auto& [position, shape, traces] = result.first.emplace_back();
position.assign(multi_iterator->position().begin(),
multi_iterator->position().end());
shape = block_shape;
ptrdiff_t i = 0;
const auto unused = {(
[&, traces_ptr = &traces[i]] {
const auto get_trace_func = [](void* ptr, IterationTrace* trace) {
trace->push_back(ptr);
};
tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =
tensorstore::internal::SimpleElementwiseFunction<
decltype(get_trace_func)(Element), IterationTrace*>();
func[multi_iterator->buffer_kind](nullptr, block_shape,
multi_iterator->block_pointers()[i],
traces_ptr);
++i;
}(),
0)...};
(void)unused;
}
return result;
}
class MaybeDirectTest : public ::testing::TestWithParam<bool> {
protected:
Arena arena;
Result<NDIterable::Ptr> GetMaybeDirectTransformedArrayNDIterable(
tensorstore::SharedOffsetArrayView<const void> array,
tensorstore::IndexTransformView<> transform) {
if (GetParam()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto transformed_array,
MakeTransformedArray(array, transform));
return GetTransformedArrayNDIterable(std::move(transformed_array),
&arena);
} else {
return GetTransformedArrayNDIterable(std::move(array), transform, &arena);
}
}
};
INSTANTIATE_TEST_SUITE_P(Indirect, MaybeDirectTest, ::testing::Values(true));
INSTANTIATE_TEST_SUITE_P(Direct, MaybeDirectTest, ::testing::Values(false));
TEST(NDIterableTransformedArrayTest, Strided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0), &a(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, SingleIndexedDimension) {
Arena arena;
auto a = AllocateArray<int>({4});
auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({1, 2, 3, 0})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable->dtype());
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(1), &a(2), &a(3), &a(0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
OneStridedOneIndexedDimensionIndexedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionContiguousBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 2});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1),
&a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1)
})),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionStridedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 4});
auto ta = (a | tensorstore::Dims(2).Stride(2) |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 2), &a(1, 0, 0), &a(1, 0, 2),
&a(0, 2, 0), &a(0, 2, 2), &a(1, 2, 0), &a(1, 2, 2),
&a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2),
&a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2)
})),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionIndexedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 2});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto tb =
(a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1})) |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable1 = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable2 = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable1.get(), iterable2.get()}},
&arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
auto element_matcher = ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1),
&a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1)
});
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(element_matcher, element_matcher), absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedAndReversedStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})) |
tensorstore::Dims(0).SizedInterval(kImplicit, kImplicit, -1))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(-1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombine) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {2, 0}})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombinePartiallyReversed) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1)
.OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}}))
.SizedInterval(kImplicit, kImplicit, {1, -1}))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(1, 1, -1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombineBothReversed) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1)
.OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}}))
.SizedInterval(kImplicit, kImplicit, -1))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(1, -1, -1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedVsStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 2});
auto b = AllocateArray<int>({2, 3});
auto tb =
(b | tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2})))
.value();
auto iterable_a = GetTransformedArrayNDIterable(a, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
tb.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 1), &a(1, 1)),
ElementsAre(&b(0, 0), &b(1, 0), &b(0, 2), &b(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedWith2StridedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 2, 3});
auto ta =
(a | tensorstore::Dims(1).MoveToFront() |
tensorstore::Dims(2).OuterIndexArraySlice(MakeArray<Index>({0, 2, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(
&a(0, 0, 0), &a(0, 1, 0), &a(1, 0, 0), &a(1, 1, 0),
&a(0, 0, 2), &a(0, 1, 2), &a(1, 0, 2), &a(1, 1, 2),
&a(0, 0, 1), &a(0, 1, 1), &a(1, 0, 1), &a(1, 1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, TwoIndexedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta =
(a |
tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1, 1})) |
tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0),
&a(1, 2), &a(1, 0), &a(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, FourIndexedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({{0, 1}, {1, 1}})) |
tensorstore::Dims(-1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {1, 0}})))
.value();
auto b = AllocateArray<int>({2, 2, 2, 2});
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(
ElementsAre(
ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),
ElementsAre(
b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,
b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,
b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,
b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, LastTwoDimsStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({{0, 1}, {1, 1}})) |
tensorstore::Dims(-1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {1, 0}})))
.value();
auto b = AllocateArray<int>({2, 2, 2, 2});
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(
ElementsAre(
ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),
ElementsAre(
b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,
b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,
b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,
b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, TwoTransformedArrays) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto b = AllocateArray<int>({2, 3});
auto ta =
(a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1})))
.value();
auto tb = (b | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 1, 2})))
.value();
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT((GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 1), &a(0, 2),
&a(1, 0), &a(1, 1), &a(1, 2)),
ElementsAre(&b(0, 0), &b(0, 1), &b(0, 2),
&b(1, 0), &b(1, 1), &b(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, ZeroRankIndexArray) {
Arena arena;
SharedArray<const Index> index_array{std::make_shared<Index>(3),
StridedLayout<>({5}, {0})};
int data[100];
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({5})
.output_index_array(0, sizeof(int) * 2, sizeof(int) * 4, index_array)
.Finalize());
auto iterable_a = GetTransformedArrayNDIterable(
{tensorstore::UnownedToShared(
tensorstore::ElementPointer<int>(&data[0])),
transform},
&arena)
.value();
MultiNDIterator<1, true> multi_iterator(
transform.input_shape(), skip_repeated_elements, {{iterable_a.get()}},
&arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, -1));
EXPECT_THAT(
(GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&data[4 * 3 + 2])), absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, OutOfBoundsConstant) {
Arena arena;
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_constant(0, 8)
.Finalize()
.value();
EXPECT_THAT(
GetTransformedArrayNDIterable(a, transform, &arena),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Checking bounds of constant output index map for "
"dimension 0: Index 8 is outside valid range \\[0, 5\\)"));
}
TEST(NDIterableTransformedArrayTest, NullTransform) {
Arena arena;
auto a = AllocateArray<int>({5});
auto iterable_a = GetTransformedArrayNDIterable(a, {}, &arena).value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype());
MultiNDIterator<1, true> multi_iterator(
a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IdentityTransform) {
Arena arena;
auto a = AllocateArray<int>({5});
auto iterable_a =
GetTransformedArrayNDIterable(
a,
tensorstore::IdentityTransform(tensorstore::span<const Index>({5})),
&arena)
.value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype());
MultiNDIterator<1, true> multi_iterator(
a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, OutOfBoundsSingleInputDimension) {
Arena arena;
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_single_input_dimension(0, 2, 1, 0)
.Finalize()
.value();
EXPECT_THAT(GetTransformedArrayNDIterable(a, transform, &arena),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Output dimension 0 range of \\[2, 7\\) is not "
"contained within array domain of \\[0, 5\\)"));
}
TEST_P(MaybeDirectTest, OutOfBoundsIndexArray) {
auto a = AllocateArray<int>({5});
auto transform =
IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_index_array(0, 2, 1, MakeArray<Index>({0, 0, 0, 0, 42}))
.Finalize()
.value();
EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 42 is outside valid range \\[-2, 3\\)"));
}
TEST_P(MaybeDirectTest, OutOfBoundsSingletonIndexArray) {
SharedArray<const Index> index_array{std::make_shared<Index>(42),
StridedLayout<>({5}, {0})};
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_index_array(0, 2, 1, index_array)
.Finalize()
.value();
EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 42 is outside valid range \\[-2, 3\\)"));
}
TEST(NDIterableTransformedArrayTest, BlockTraceThreeStridedDimensions) {
Arena arena;
auto a = AllocateArray<int>({2, 5, 3});
auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1, 2));
EXPECT_THAT(
GetBlockTrace<int>(&multi_iterator),
Pair(ElementsAre(FieldsAre(ElementsAre(0, 0, 0), ElementsAre(2, 3),
ElementsAre(ElementsAreArray({
&a(0, 0, 0),
&a(0, 0, 1),
&a(0, 0, 2),
&a(0, 2, 0),
&a(0, 2, 1),
&a(0, 2, 2),
}))),
FieldsAre(ElementsAre(1, 0, 0), ElementsAre(2, 3),
ElementsAre(ElementsAreArray({
&a(1, 0, 0),
&a(1, 0, 1),
&a(1, 0, 2),
&a(1, 2, 0),
&a(1, 2, 1),
&a(1, 2, 2),
})))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
InnermostBlockSizeLessThanInnermostIterationSize) {
Arena arena;
auto a = AllocateArray<int>({2, 32768}, tensorstore::c_order,
tensorstore::value_init);
auto ta = (a | tensorstore::Dims(0).IndexArraySlice(MakeArray<Index>({0, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
struct IncrementValue {
void operator()(int* x) const { *x += 1; }
};
constexpr tensorstore::internal::ElementwiseFunction<1> increment_value_func =
tensorstore::internal::SimpleElementwiseFunction<IncrementValue(int)>();
TENSORSTORE_ASSERT_OK(
(tensorstore::internal::IterateOverNDIterables<1, true>(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena,
{&increment_value_func, nullptr})));
EXPECT_THAT(a, tensorstore::MatchesArray(
tensorstore::BroadcastArray(
tensorstore::MakeScalarArray<int>(1), a.shape())
.value()));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
15e7ba7c-a336-42e7-96e0-75df36916842 | cpp | google/tensorstore | dimension_labels | tensorstore/internal/dimension_labels.cc | tensorstore/internal/dimension_labels_test.cc | #include "tensorstore/internal/dimension_labels.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <string_view>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal {
namespace {
absl::Status ValidateDimensionLabelsAreUniqueImpl(
tensorstore::span<std::string_view> sorted_labels) {
std::sort(sorted_labels.begin(), sorted_labels.end());
size_t i;
for (i = 1; i < sorted_labels.size() && sorted_labels[i].empty(); ++i)
continue;
std::string error;
for (; i < sorted_labels.size(); ++i) {
std::string_view label = sorted_labels[i];
if (label == sorted_labels[i - 1]) {
tensorstore::StrAppend(&error, error.empty() ? "" : ", ",
QuoteString(label));
}
}
if (!error.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Dimension label(s) ", error, " not unique"));
}
return absl::OkStatus();
}
}
absl::Status ValidateDimensionLabelsAreUnique(
tensorstore::span<const std::string> labels) {
absl::FixedArray<std::string_view, kMaxRank> sorted_labels(labels.begin(),
labels.end());
return ValidateDimensionLabelsAreUniqueImpl(sorted_labels);
}
absl::Status ValidateDimensionLabelsAreUnique(
tensorstore::span<const std::string_view> labels) {
absl::FixedArray<std::string_view, kMaxRank> sorted_labels(labels.begin(),
labels.end());
return ValidateDimensionLabelsAreUniqueImpl(sorted_labels);
}
}
} | #include "tensorstore/internal/dimension_labels.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::ValidateDimensionLabelsAreUnique;
TEST(ValidateDimensionLabelsAreUniqueTest, Basic) {
TENSORSTORE_EXPECT_OK(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c"}));
TENSORSTORE_EXPECT_OK(
ValidateDimensionLabelsAreUnique(std::vector<std::string>{"", "", ""}));
TENSORSTORE_EXPECT_OK(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "", "d", ""}));
TENSORSTORE_EXPECT_OK(
ValidateDimensionLabelsAreUnique(std::vector<std::string>{}));
EXPECT_THAT(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c", "a"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label.* \"a\" not unique"));
EXPECT_THAT(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c", "b"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label.* \"b\" not unique"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/dimension_labels.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/dimension_labels_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f09af30f-f8df-4c58-a306-5b6f6fb53caf | cpp | google/tensorstore | lock_collection | tensorstore/internal/lock_collection.cc | tensorstore/internal/lock_collection_test.cc | #include "tensorstore/internal/lock_collection.h"
namespace tensorstore {
namespace internal {
bool LockCollection::MutexSharedLockFunction(void* mutex, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto& m = *static_cast<absl::Mutex*>(mutex);
if (lock) {
m.ReaderLock();
} else {
m.ReaderUnlock();
}
return true;
}
bool LockCollection::MutexExclusiveLockFunction(void* mutex, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto& m = *static_cast<absl::Mutex*>(mutex);
if (lock) {
m.WriterLock();
} else {
m.WriterUnlock();
}
return true;
}
bool LockCollection::try_lock() {
if (locks_.size() > 1) {
std::sort(locks_.begin(), locks_.end(), [](const Entry& a, const Entry& b) {
return a.tagged_pointer < b.tagged_pointer;
});
locks_.erase(std::unique(locks_.begin(), locks_.end(),
[](const Entry& a, const Entry& b) {
return a.data() == b.data();
}),
locks_.end());
}
size_t i = 0, size = locks_.size();
auto* locks = locks_.data();
for (; i < size; ++i) {
auto& entry = locks[i];
if (!entry.lock_function(entry.data(), true)) {
while (i > 0) {
--i;
auto& prev_entry = locks[i];
prev_entry.lock_function(prev_entry.data(), false);
}
return false;
}
}
return true;
}
void LockCollection::unlock() {
for (const auto& entry : locks_) {
entry.lock_function(entry.data(), false);
}
}
void LockCollection::clear() { locks_.clear(); }
}
} | #include "tensorstore/internal/lock_collection.h"
#include <array>
#include <cstddef>
#include <mutex>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/testing/concurrent.h"
namespace {
using ::tensorstore::internal::LockCollection;
using ::tensorstore::internal_testing::TestConcurrent;
TEST(LockCollectionTest, Empty) {
LockCollection c;
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
}
}
TEST(LockCollectionTest, SingleShared) {
absl::Mutex m;
LockCollection c;
c.RegisterShared(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, SingleSharedDuplicate) {
absl::Mutex m;
LockCollection c;
c.RegisterShared(m);
c.RegisterShared(m);
c.RegisterShared(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertReaderHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, SingleExclusive) {
absl::Mutex m;
LockCollection c;
c.RegisterExclusive(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertHeld();
}
m.AssertNotHeld();
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, SingleExclusiveDuplicate) {
absl::Mutex m;
LockCollection c;
c.RegisterShared(m);
c.RegisterExclusive(m);
c.RegisterShared(m);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m.AssertHeld();
}
m.AssertNotHeld();
}
TEST(LockCollectionTest, Multiple) {
absl::Mutex m[3];
LockCollection c;
c.RegisterShared(m[0]);
c.RegisterExclusive(m[0]);
c.RegisterShared(m[1]);
c.RegisterShared(m[0]);
c.RegisterShared(m[2]);
c.RegisterShared(m[1]);
c.RegisterShared(m[1]);
c.RegisterShared(m[2]);
{
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
ASSERT_TRUE(guard);
m[0].AssertHeld();
m[1].AssertReaderHeld();
m[2].AssertReaderHeld();
}
m[0].AssertNotHeld();
m[1].AssertNotHeld();
m[2].AssertNotHeld();
}
#if !defined(_WIN32)
TEST(LockCollectionTest, MultipleConcurrentExclusive) {
constexpr static size_t kNumMutexes = 3;
absl::Mutex m[kNumMutexes];
constexpr static size_t kNumCollections = 3;
LockCollection c[kNumCollections];
std::array<int, kNumMutexes> mutex_indices;
absl::c_iota(mutex_indices, 0);
const auto RegisterFromPermutation = [&](LockCollection& lock_collection) {
for (auto i : mutex_indices) lock_collection.RegisterExclusive(m[i]);
};
RegisterFromPermutation(c[0]);
absl::c_next_permutation(mutex_indices);
RegisterFromPermutation(c[1]);
while (absl::c_next_permutation(mutex_indices)) {
c[2] = LockCollection();
RegisterFromPermutation(c[2]);
TestConcurrent<kNumCollections>(
100,
[] {},
[] {},
[&](size_t i) {
std::unique_lock<LockCollection> guard(c[i], std::try_to_lock);
ASSERT_TRUE(guard);
});
}
}
TEST(LockCollectionTest, MultipleConcurrentExclusiveShared) {
constexpr static size_t kNumMutexes = 3;
absl::Mutex m[kNumMutexes];
constexpr static size_t kNumCollections = 3;
constexpr static size_t kNumSharedCombinations = size_t(1) << kNumMutexes;
LockCollection c[kNumCollections];
std::array<int, kNumMutexes> mutex_indices;
absl::c_iota(mutex_indices, 0);
const auto RegisterFromPermutation = [&](LockCollection& lock_collection,
size_t shared_bit_vector) {
for (auto i : mutex_indices) {
if ((shared_bit_vector >> i) & i) {
lock_collection.RegisterShared(m[i]);
} else {
lock_collection.RegisterExclusive(m[i]);
}
}
};
RegisterFromPermutation(c[0], 0);
absl::c_next_permutation(mutex_indices);
RegisterFromPermutation(c[1], ~size_t(0));
while (absl::c_next_permutation(mutex_indices)) {
for (size_t shared_bit_vector = 0;
shared_bit_vector < kNumSharedCombinations; ++shared_bit_vector) {
c[2] = LockCollection();
RegisterFromPermutation(c[2], shared_bit_vector);
TestConcurrent<kNumCollections>(
20,
[] {},
[] {},
[&](size_t i) {
std::unique_lock<LockCollection> guard(c[i], std::try_to_lock);
EXPECT_TRUE(guard);
});
}
}
}
#endif
struct LoggingLockable;
using LockLog = std::vector<std::pair<LoggingLockable*, bool>>;
struct LoggingLockable {
LockLog& log;
bool fail;
};
TEST(LockCollectionTest, Fail) {
LockLog log;
LoggingLockable lockables[4] = {
LoggingLockable{log, false},
LoggingLockable{log, false},
LoggingLockable{log, true},
LoggingLockable{log, true},
};
constexpr auto lock_function = [](void* data, bool lock) -> bool {
auto* lockable = static_cast<LoggingLockable*>(data);
lockable->log.emplace_back(lockable, lock);
if (lock && lockable->fail) return false;
return true;
};
LockCollection c;
for (auto& lockable : lockables) {
c.Register(&lockable, lock_function, false);
}
std::unique_lock<LockCollection> guard(c, std::try_to_lock);
EXPECT_FALSE(guard);
EXPECT_THAT(log,
::testing::ElementsAre(::testing::Pair(&lockables[0], true),
::testing::Pair(&lockables[1], true),
::testing::Pair(&lockables[2], true),
::testing::Pair(&lockables[1], false),
::testing::Pair(&lockables[0], false)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/lock_collection.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/lock_collection_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
68355e70-e453-4069-8d76-79f9dacc44d2 | cpp | google/tensorstore | nditerable_data_type_conversion | tensorstore/internal/nditerable_data_type_conversion.cc | tensorstore/internal/nditerable_data_type_conversion_test.cc | #include "tensorstore/internal/nditerable_data_type_conversion.h"
#include <cassert>
#include <memory>
#include <utility>
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
namespace tensorstore {
namespace internal {
namespace {
template <typename Derived, typename BasePointer = NDIterable::Ptr>
class NDIterableAdapter : public NDIterable::Base<Derived> {
public:
NDIterableAdapter(BasePointer base) : base_(std::move(base)) {}
const BasePointer& base() const { return base_; }
BasePointer& base() { return base_; }
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
return base_->GetDimensionOrder(dim_i, dim_j);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
base_->UpdateDirectionPrefs(prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
return base_->CanCombineDimensions(dim_i, dir_i, dim_j, dir_j, size_j);
}
NDIterable::IterationBufferConstraint GetIterationBufferConstraint(
NDIterable::IterationLayoutView layout) const override {
return base_->GetIterationBufferConstraint(layout);
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
NDIterable::IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return base_->GetWorkingMemoryBytesPerElement(layout, buffer_kind);
}
DataType dtype() const override { return base_->dtype(); }
ArenaAllocator<> get_allocator() const override {
return base_->get_allocator();
}
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return base_->GetIterator(layout);
}
private:
BasePointer base_;
};
class ReinterpretCastNDIterable
: public NDIterableAdapter<ReinterpretCastNDIterable> {
public:
ReinterpretCastNDIterable(NDIterable::Ptr base, DataType new_dtype,
ArenaAllocator<> allocator)
: NDIterableAdapter<ReinterpretCastNDIterable>(std::move(base)),
dtype_(new_dtype) {}
DataType dtype() const override { return dtype_; }
private:
DataType dtype_;
};
}
NDIterable::Ptr GetConvertedInputNDIterable(
NDIterable::Ptr iterable, DataType target_type,
const DataTypeConversionLookupResult& conversion) {
assert(DataTypeConversionFlags::kSupported ==
(conversion.flags & DataTypeConversionFlags::kSupported));
if (DataTypeConversionFlags::kIdentity ==
(conversion.flags & DataTypeConversionFlags::kIdentity)) {
return iterable;
}
auto allocator = iterable->get_allocator();
if (DataTypeConversionFlags::kCanReinterpretCast ==
(conversion.flags & DataTypeConversionFlags::kCanReinterpretCast)) {
return MakeUniqueWithVirtualIntrusiveAllocator<ReinterpretCastNDIterable>(
allocator, std::move(iterable), target_type);
}
return GetElementwiseInputTransformNDIterable({{std::move(iterable)}},
target_type, conversion.closure,
allocator.arena());
}
NDIterable::Ptr GetConvertedOutputNDIterable(
NDIterable::Ptr iterable, DataType source_type,
const DataTypeConversionLookupResult& conversion) {
assert(!!(conversion.flags & DataTypeConversionFlags::kSupported));
if (!!(conversion.flags & DataTypeConversionFlags::kIdentity)) {
return iterable;
}
auto allocator = iterable->get_allocator();
if (!!(conversion.flags & DataTypeConversionFlags::kCanReinterpretCast)) {
return MakeUniqueWithVirtualIntrusiveAllocator<ReinterpretCastNDIterable>(
allocator, std::move(iterable), source_type);
}
return GetElementwiseOutputTransformNDIterable(
std::move(iterable), source_type, conversion.closure, allocator.arena());
}
}
} | #include "tensorstore/internal/nditerable_data_type_conversion.h"
#include <stdint.h>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Shared;
using ::tensorstore::SharedArray;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::GetDataTypeConverter;
using ::testing::Pair;
using ::tensorstore::dtypes::json_t;
using ::tensorstore::dtypes::string_t;
}
class NDIterableDataTypeConversionTest : public ::testing::TestWithParam<bool> {
protected:
tensorstore::internal::Arena arena;
std::pair<absl::Status, SharedArray<const void>> Convert(
TransformedArray<Shared<const void>> source, DataType target_dtype) {
tensorstore::internal::Arena arena;
auto target =
tensorstore::AllocateArray(source.shape(), tensorstore::c_order,
tensorstore::value_init, target_dtype);
auto source_iterable =
tensorstore::internal::GetTransformedArrayNDIterable(source, &arena)
.value();
auto target_iterable =
tensorstore::internal::GetArrayNDIterable(target, &arena);
if (GetParam()) {
source_iterable = GetConvertedInputNDIterable(
std::move(source_iterable), target_dtype,
GetDataTypeConverter(source.dtype(), target_dtype));
} else {
target_iterable = GetConvertedOutputNDIterable(
std::move(target_iterable), source.dtype(),
GetDataTypeConverter(source.dtype(), target_dtype));
}
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *target_iterable, target.shape(),
tensorstore::c_order, &arena);
absl::Status status = copier.Copy();
return std::make_pair(status, target);
}
};
INSTANTIATE_TEST_SUITE_P(GetConvertedInputNDIterable,
NDIterableDataTypeConversionTest,
::testing::Values(true));
INSTANTIATE_TEST_SUITE_P(GetConvertedOutputNDIterable,
NDIterableDataTypeConversionTest,
::testing::Values(false));
TEST_P(NDIterableDataTypeConversionTest, Int32ToInt32) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<int32_t>),
Pair(absl::OkStatus(), MakeArray<int32_t>({1, 2, 3})));
}
TEST_P(NDIterableDataTypeConversionTest, Int32ToUint32) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<uint32_t>),
Pair(absl::OkStatus(), MakeArray<uint32_t>({1, 2, 3})));
}
TEST_P(NDIterableDataTypeConversionTest, Int32ToString) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<string_t>),
Pair(absl::OkStatus(), MakeArray<string_t>({"1", "2", "3"})));
}
TEST_P(NDIterableDataTypeConversionTest, JsonToString) {
EXPECT_THAT(
Convert(MakeArray<json_t>({"hello", "world", 3}), dtype_v<string_t>),
Pair(MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string, but received: 3"),
MakeArray<string_t>({"hello", "world", ""})));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_data_type_conversion.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_data_type_conversion_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f92f7a68-f778-4514-9db2-42387ef2abfa | cpp | google/tensorstore | json_gtest | tensorstore/internal/json_gtest.cc | tensorstore/internal/json_gtest_test.cc | #include "tensorstore/internal/json_gtest.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
class JsonMatcherImpl : public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonMatcherImpl(::nlohmann::json value) : value_(std::move(value)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
if (!internal_json::JsonSame(value_, value_untyped)) {
if (listener->IsInterested()) {
*listener << "where the difference is:\n"
<< ::nlohmann::json::diff(value_, value_untyped)
.dump(
2, ' ',
true,
::nlohmann::json::error_handler_t::ignore);
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << "matches json " << value_;
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not match json " << value_;
}
private:
::nlohmann::json value_;
};
}
::testing::Matcher<::nlohmann::json> MatchesJson(::nlohmann::json j) {
return ::testing::MakeMatcher(new JsonMatcherImpl(std::move(j)));
}
namespace {
class JsonPointerMatcherImpl
: public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonPointerMatcherImpl(std::string sub_value_pointer,
::testing::Matcher<::nlohmann::json> sub_value_matcher)
: sub_value_pointer_(std::move(sub_value_pointer)),
sub_value_matcher_(std::move(sub_value_matcher)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
auto sub_value =
json_pointer::Dereference(value_untyped, sub_value_pointer_);
if (!sub_value.ok()) {
if (listener->IsInterested()) {
*listener << "where the pointer could not be resolved: "
<< sub_value.status();
}
return false;
}
if (listener->IsInterested()) {
::testing::StringMatchResultListener s;
if (!sub_value_matcher_.MatchAndExplain(**sub_value, &s)) {
*listener << "whose sub value doesn't match";
auto str = s.str();
if (!str.empty()) {
*listener << ", " << str;
}
return false;
}
return true;
}
return sub_value_matcher_.Matches(**sub_value);
}
void DescribeTo(std::ostream* os) const override {
*os << "has sub value " << tensorstore::QuoteString(sub_value_pointer_)
<< " that ";
sub_value_matcher_.DescribeTo(os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not have sub value "
<< tensorstore::QuoteString(sub_value_pointer_) << " that ";
sub_value_matcher_.DescribeTo(os);
}
private:
std::string sub_value_pointer_;
::testing::Matcher<nlohmann::json> sub_value_matcher_;
};
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer,
::testing::Matcher<::nlohmann::json> value_matcher) {
return ::testing::MakeMatcher(new JsonPointerMatcherImpl(
std::move(json_pointer), std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer, ::nlohmann::json value_matcher) {
return JsonSubValueMatches(std::move(json_pointer),
MatchesJson(std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValuesMatch(
std::vector<std::pair<std::string, ::nlohmann::json>> matchers) {
std::vector<::testing::Matcher<::nlohmann::json>> all;
all.reserve(matchers.size());
for (const auto& p : matchers) {
all.push_back(JsonSubValueMatches(p.first, p.second));
}
return ::testing::AllOfArray(all);
}
} | #include "tensorstore/internal/json_gtest.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::JsonSubValueMatches;
using ::tensorstore::JsonSubValuesMatch;
using ::tensorstore::MatchesJson;
template <typename MatcherType>
std::string Describe(const MatcherType& m) {
std::ostringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(JsonSubValueMatchesTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValueMatches("/a", 123));
EXPECT_THAT(obj, JsonSubValueMatches("/b/c", "xyz"));
EXPECT_THAT(obj,
JsonSubValueMatches("/b/c", ::testing::Not(MatchesJson("xy"))));
EXPECT_THAT(Describe(JsonSubValueMatches("/a", 123)),
"has sub value \"/a\" that matches json 123");
EXPECT_THAT(Explain(JsonSubValueMatches("/a", 124), obj),
::testing::StartsWith(
"whose sub value doesn't match, where the difference is:"));
}
TEST(JsonSubValuesMatchTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValuesMatch({{"/a", 123}, {"/b/c", "xyz"}}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_gtest.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_gtest_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
270ece82-627b-47fa-94a3-20bcccd94c35 | cpp | google/tensorstore | nditerable_elementwise_input_transform | tensorstore/internal/nditerable_elementwise_input_transform.cc | tensorstore/internal/nditerable_elementwise_input_transform_test.cc | #include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <stddef.h>
#include <array>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
template <size_t Arity>
class ElementwiseInputTransformNDIterator
: public NDIterator::Base<ElementwiseInputTransformNDIterator<Arity>> {
public:
explicit ElementwiseInputTransformNDIterator(
tensorstore::span<const NDIterable::Ptr, Arity> inputs,
ElementwiseClosure<Arity + 1, void*> closure,
NDIterable::IterationBufferKindLayoutView layout,
ArenaAllocator<> allocator)
: inputs_(inputs, layout, allocator),
context_(closure.context),
elementwise_function_((*closure.function)[layout.buffer_kind]) {}
ArenaAllocator<> get_allocator() const override {
return inputs_.get_allocator();
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
return inputs_.GetBlock(indices, block_shape, status) &&
InvokeElementwiseFunction<Arity>(
elementwise_function_, context_, block_shape,
inputs_.block_pointers(), *pointer, static_cast<void*>(status));
}
private:
NDIteratorsWithManagedBuffers<Arity> inputs_;
void* context_;
SpecializedElementwiseFunctionPointer<Arity + 1, void*> elementwise_function_;
};
template <size_t Arity>
class ElementwiseInputTransformNDIterable
: public NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>> {
using Base = NDIterablesWithManagedBuffers<
std::array<NDIterable::Ptr, Arity>,
NDIterable::Base<ElementwiseInputTransformNDIterable<Arity>>>;
public:
ElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity> input_iterables, DataType output_dtype,
ElementwiseClosure<Arity + 1, void*> closure, ArenaAllocator<> allocator)
: Base{std::move(input_iterables)},
output_dtype_(output_dtype),
closure_(closure),
allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
DataType dtype() const override { return output_dtype_; }
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterator<Arity>>(allocator_, this->iterables,
closure_, layout);
}
private:
std::array<NDIterable::Ptr, Arity> inputs_;
DataType output_dtype_;
ElementwiseClosure<Arity + 1, void*> closure_;
ArenaAllocator<> allocator_;
};
}
template <size_t Arity>
NDIterable::Ptr GetElementwiseInputTransformNDIterable(
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype,
ElementwiseClosure<Arity, void*> closure, Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<
ElementwiseInputTransformNDIterable<Arity - 1>>(
ArenaAllocator<>(arena), std::move(inputs), output_dtype, closure);
}
#define TENSORSTORE_INTERNAL_DO_INSTANTIATE(Arity) \
template NDIterable::Ptr GetElementwiseInputTransformNDIterable<Arity>( \
std::array<NDIterable::Ptr, Arity - 1> inputs, DataType output_dtype, \
ElementwiseClosure<Arity, void*> closure, Arena * arena); \
TENSORSTORE_INTERNAL_DO_INSTANTIATE(1)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(2)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(3)
TENSORSTORE_INTERNAL_DO_INSTANTIATE(4)
#undef TENSORSTORE_INTERNAL_DO_INSTANTIATE
}
} | #include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include <new>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::NDIterableCopier;
using ::testing::_;
using ::testing::Pair;
template <typename Func, typename DestArray, typename... SourceArray>
absl::Status TestCopy(Func func, tensorstore::IterationConstraints constraints,
DestArray dest_array, SourceArray... source_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<sizeof...(SourceArray) + 1, void*>
closure = tensorstore::internal::SimpleElementwiseFunction<
Func(typename SourceArray::Element..., typename DestArray::Element),
void*>::Closure(&func);
auto iterable = tensorstore::internal::GetElementwiseInputTransformNDIterable(
{{tensorstore::internal::GetTransformedArrayNDIterable(source_array,
&arena)
.value()...}},
tensorstore::dtype_v<typename DestArray::Element>, closure, &arena);
return NDIterableCopier(*iterable,
*tensorstore::internal::GetTransformedArrayNDIterable(
dest_array, &arena)
.value(),
dest_array.shape(), constraints, &arena)
.Copy();
}
TEST(NDIterableElementwiseInputTransformTest, Nullary) {
auto dest = tensorstore::AllocateArray<double>({2, 3});
TENSORSTORE_EXPECT_OK(TestCopy([](double* dest, void* arg) { *dest = 42.0; },
{}, dest));
EXPECT_EQ(
tensorstore::MakeArray<double>({{42.0, 42.0, 42.0}, {42.0, 42.0, 42.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Unary) {
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest = tensorstore::AllocateArray<double>(source.shape());
TENSORSTORE_EXPECT_OK(TestCopy(
[](const int* source, double* dest, void* arg) { *dest = -*source; },
{}, dest, source));
EXPECT_EQ(
tensorstore::MakeArray<double>({{-1.0, -2.0, -3.0}, {-4.0, -5.0, -6.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Binary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(TestCopy([](const int* a, const int* b, double* dest,
void* arg) { *dest = 2.0 * *a + *b; },
{}, dest, a, b));
EXPECT_EQ(
tensorstore::MakeArray<double>({{12.0, 16.0, 20.0}, {24.0, 28.0, 32.0}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, Ternary) {
auto a = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::MakeArray<int>({{10, 12, 14}, {16, 18, 20}});
auto c = tensorstore::MakeArray<double>({{1, -1, 1}, {-1, -1, 1}});
auto dest = tensorstore::AllocateArray<double>(a.shape());
TENSORSTORE_EXPECT_OK(
TestCopy([](const int* a, const int* b, const double* c, double* dest,
void* arg) { *dest = *a + *b * *c; },
{}, dest, a, b, c));
EXPECT_EQ(
tensorstore::MakeArray<double>({{1 + 10 * 1, 2 + 12 * -1, 3 + 14 * 1},
{4 + 16 * -1, 5 + 18 * -1, 6 + 20 * 1}}),
dest);
}
TEST(NDIterableElementwiseInputTransformTest, PartialCopy) {
auto source = tensorstore::MakeArray<int>({1, 2, 3, 0, 5, 6});
auto dest = tensorstore::AllocateArray<double>(
source.shape(), tensorstore::c_order, tensorstore::value_init);
EXPECT_THAT(TestCopy(
[](const int* source, double* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 0) {
*status = absl::UnknownError("zero");
return false;
}
*dest = -*source;
return true;
},
tensorstore::c_order, dest, source),
MatchesStatus(absl::StatusCode::kUnknown, "zero"));
EXPECT_EQ(tensorstore::MakeArray<double>({-1.0, -2.0, -3.0, 0.0, 0.0, 0.0}),
dest);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_input_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_elementwise_input_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a6f2f6b4-003f-41a7-9531-8c33c0835c28 | cpp | google/tensorstore | grid_chunk_key_ranges_base10 | tensorstore/internal/grid_chunk_key_ranges_base10.cc | tensorstore/internal/grid_chunk_key_ranges_base10_test.cc | #include "tensorstore/internal/grid_chunk_key_ranges_base10.h"
#include <string>
#include <string_view>
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
std::string Base10LexicographicalGridIndexKeyParser::FormatKey(
tensorstore::span<const Index> grid_indices) const {
if (rank == 0) return "0";
std::string key;
FormatGridIndexKeyWithDimensionSeparator(
key, dimension_separator,
[](std::string& out, DimensionIndex dim, Index grid_index) {
absl::StrAppend(&out, grid_index);
},
rank, grid_indices);
return key;
}
bool Base10LexicographicalGridIndexKeyParser::ParseKey(
std::string_view key, tensorstore::span<Index> grid_indices) const {
return ParseGridIndexKeyWithDimensionSeparator(
dimension_separator,
[](std::string_view part, DimensionIndex dim, Index& grid_index) {
if (part.empty() || !absl::ascii_isdigit(part.front()) ||
!absl::ascii_isdigit(part.back()) ||
!absl::SimpleAtoi(part, &grid_index)) {
return false;
}
return true;
},
key, grid_indices);
}
Index Base10LexicographicalGridIndexKeyParser::
MinGridIndexForLexicographicalOrder(DimensionIndex dim,
IndexInterval grid_interval) const {
return MinValueWithMaxBase10Digits(grid_interval.exclusive_max());
}
Index MinValueWithMaxBase10Digits(Index exclusive_max) {
if (exclusive_max <= 10) {
return 0;
}
Index min_value = 10;
while (min_value * 10 < exclusive_max) {
min_value *= 10;
}
return min_value;
}
}
} | #include "tensorstore/internal/grid_chunk_key_ranges_base10.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::internal::Base10LexicographicalGridIndexKeyParser;
using ::tensorstore::internal::MinValueWithMaxBase10Digits;
TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank0) {
Base10LexicographicalGridIndexKeyParser parser(0,
'/');
EXPECT_THAT(parser.FormatKey({}), "0");
}
TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank1) {
Base10LexicographicalGridIndexKeyParser parser(1,
'/');
EXPECT_THAT(parser.FormatKey({{2}}), "2");
EXPECT_THAT(parser.FormatKey({}), "");
}
TEST(Base10LexicographicalGridIndexKeyParserTest, FormatKeyRank2) {
Base10LexicographicalGridIndexKeyParser parser(2,
'/');
EXPECT_THAT(parser.FormatKey({{2, 3}}), "2/3");
EXPECT_THAT(parser.FormatKey({{2}}), "2/");
EXPECT_THAT(parser.FormatKey({}), "");
}
TEST(Base10LexicographicalGridIndexKeyParserTest, ParseKeyRank1) {
Base10LexicographicalGridIndexKeyParser parser(1,
'/');
Index indices[1];
EXPECT_TRUE(parser.ParseKey("2", indices));
EXPECT_THAT(indices, ::testing::ElementsAre(2));
EXPECT_FALSE(parser.ParseKey("", indices));
EXPECT_FALSE(parser.ParseKey("-1", indices));
EXPECT_FALSE(parser.ParseKey("a", indices));
EXPECT_FALSE(parser.ParseKey("2/3", indices));
EXPECT_FALSE(parser.ParseKey("2/", indices));
}
TEST(Base10LexicographicalGridIndexKeyParserTest, ParseKeyRank2) {
Base10LexicographicalGridIndexKeyParser parser(2,
'/');
Index indices[2];
EXPECT_TRUE(parser.ParseKey("2/3", indices));
EXPECT_THAT(indices, ::testing::ElementsAre(2, 3));
EXPECT_TRUE(parser.ParseKey("212/335", indices));
EXPECT_THAT(indices, ::testing::ElementsAre(212, 335));
EXPECT_FALSE(parser.ParseKey("1", indices));
EXPECT_FALSE(parser.ParseKey("", indices));
EXPECT_FALSE(parser.ParseKey("1/2/3", indices));
EXPECT_FALSE(parser.ParseKey("1/2/", indices));
}
TEST(Base10LexicographicalGridIndexKeyParserTest,
MinGridIndexForLexicographicalOrder) {
Base10LexicographicalGridIndexKeyParser parser(2,
'/');
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 1)),
0);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 9)),
0);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 10)),
0);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 11)),
10);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 100)),
10);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 101)),
100);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 999)),
100);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 1000)),
100);
EXPECT_THAT(parser.MinGridIndexForLexicographicalOrder(
0, IndexInterval::UncheckedHalfOpen(0, 1001)),
1000);
}
TEST(MinValueWithMaxBase10DigitsTest, Basic) {
EXPECT_EQ(0, MinValueWithMaxBase10Digits(0));
EXPECT_EQ(0, MinValueWithMaxBase10Digits(1));
EXPECT_EQ(0, MinValueWithMaxBase10Digits(9));
EXPECT_EQ(0, MinValueWithMaxBase10Digits(10));
EXPECT_EQ(10, MinValueWithMaxBase10Digits(11));
EXPECT_EQ(10, MinValueWithMaxBase10Digits(100));
EXPECT_EQ(100, MinValueWithMaxBase10Digits(101));
EXPECT_EQ(100, MinValueWithMaxBase10Digits(999));
EXPECT_EQ(100, MinValueWithMaxBase10Digits(1000));
EXPECT_EQ(1000, MinValueWithMaxBase10Digits(1001));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_chunk_key_ranges_base10.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_chunk_key_ranges_base10_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1a3554d7-3320-47c5-846e-3dba1ae4068b | cpp | google/tensorstore | masked_array | tensorstore/internal/masked_array.cc | tensorstore/internal/masked_array_test.cc | #include "tensorstore/internal/masked_array.h"
#include <algorithm>
#include <cassert>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
struct SetMask {
void operator()(bool* x, void*) const { *x = true; }
};
struct SetMaskAndCountChanged {
Index num_changed = 0;
void operator()(bool* x) {
if (!*x) {
++num_changed;
*x = true;
}
}
};
bool IsHullEqualToUnion(BoxView<> a, BoxView<> b) {
assert(a.rank() == b.rank());
Index hull_num_elements = 1, a_num_elements = 1, b_num_elements = 1,
intersection_num_elements = 1;
for (DimensionIndex i = 0; i < a.rank(); ++i) {
IndexInterval a_interval = a[i], b_interval = b[i];
IndexInterval hull = Hull(a_interval, b_interval);
IndexInterval intersection = Intersect(a_interval, b_interval);
hull_num_elements *= hull.size();
a_num_elements *= a_interval.size();
b_num_elements *= b_interval.size();
intersection_num_elements *= intersection.size();
}
return (hull_num_elements ==
a_num_elements + b_num_elements - intersection_num_elements);
}
void Hull(BoxView<> a, BoxView<> b, MutableBoxView<> out) {
const DimensionIndex rank = out.rank();
assert(a.rank() == rank && b.rank() == rank);
for (DimensionIndex i = 0; i < rank; ++i) {
out[i] = Hull(a[i], b[i]);
}
}
void Intersect(BoxView<> a, BoxView<> b, MutableBoxView<> out) {
const DimensionIndex rank = out.rank();
assert(a.rank() == rank && b.rank() == rank);
for (DimensionIndex i = 0; i < rank; ++i) {
out[i] = Intersect(a[i], b[i]);
}
}
Index GetRelativeOffset(tensorstore::span<const Index> base,
tensorstore::span<const Index> position,
tensorstore::span<const Index> strides) {
const DimensionIndex rank = base.size();
assert(rank == position.size());
assert(rank == strides.size());
Index result = 0;
for (DimensionIndex i = 0; i < rank; ++i) {
result = internal::wrap_on_overflow::Add(
result, internal::wrap_on_overflow::Multiply(
strides[i], internal::wrap_on_overflow::Subtract(
position[i], base[i])));
}
return result;
}
void RemoveMaskArrayIfNotNeeded(MaskData* mask) {
if (mask->num_masked_elements == mask->region.num_elements()) {
mask->mask_array.reset();
}
}
}
MaskData::MaskData(DimensionIndex rank) : region(rank) {
region.Fill(IndexInterval::UncheckedSized(0, 0));
}
std::unique_ptr<bool[], FreeDeleter> CreateMaskArray(
BoxView<> box, BoxView<> mask_region,
tensorstore::span<const Index> byte_strides) {
std::unique_ptr<bool[], FreeDeleter> result(
static_cast<bool*>(std::calloc(box.num_elements(), sizeof(bool))));
ByteStridedPointer<bool> start = result.get();
start += GetRelativeOffset(box.origin(), mask_region.origin(), byte_strides);
internal::IterateOverArrays(
internal::SimpleElementwiseFunction<SetMask(bool), void*>{},
nullptr,
skip_repeated_elements,
ArrayView<bool>(start.get(),
StridedLayoutView<>(mask_region.shape(), byte_strides)));
return result;
}
void CreateMaskArrayFromRegion(BoxView<> box, MaskData* mask,
tensorstore::span<const Index> byte_strides) {
assert(mask->num_masked_elements == mask->region.num_elements());
mask->mask_array = CreateMaskArray(box, mask->region, byte_strides);
}
void UnionMasks(BoxView<> box, MaskData* mask_a, MaskData* mask_b) {
assert(mask_a != mask_b);
if (mask_a->num_masked_elements == 0) {
std::swap(*mask_a, *mask_b);
return;
} else if (mask_b->num_masked_elements == 0) {
return;
}
const DimensionIndex rank = box.rank();
assert(mask_a->region.rank() == rank);
assert(mask_b->region.rank() == rank);
if (mask_a->mask_array && mask_b->mask_array) {
const Index size = box.num_elements();
mask_a->num_masked_elements = 0;
for (Index i = 0; i < size; ++i) {
if ((mask_a->mask_array[i] |= mask_b->mask_array[i])) {
++mask_a->num_masked_elements;
}
}
Hull(mask_a->region, mask_b->region, mask_a->region);
RemoveMaskArrayIfNotNeeded(mask_a);
return;
}
if (!mask_a->mask_array && !mask_b->mask_array) {
if (IsHullEqualToUnion(mask_a->region, mask_b->region)) {
Hull(mask_a->region, mask_b->region, mask_a->region);
mask_a->num_masked_elements = mask_a->region.num_elements();
return;
}
} else if (!mask_a->mask_array) {
std::swap(*mask_a, *mask_b);
}
Index byte_strides[kMaxRank];
const tensorstore::span<Index> byte_strides_span(&byte_strides[0], rank);
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), box.shape(),
byte_strides_span);
if (!mask_a->mask_array) {
CreateMaskArrayFromRegion(box, mask_a, byte_strides_span);
}
ByteStridedPointer<bool> start = mask_a->mask_array.get();
start += GetRelativeOffset(box.origin(), mask_b->region.origin(),
byte_strides_span);
IterateOverArrays(
[&](bool* ptr) {
if (!*ptr) ++mask_a->num_masked_elements;
*ptr = true;
},
{},
ArrayView<bool>(start.get(), StridedLayoutView<>(mask_b->region.shape(),
byte_strides_span)));
Hull(mask_a->region, mask_b->region, mask_a->region);
RemoveMaskArrayIfNotNeeded(mask_a);
}
void RebaseMaskedArray(BoxView<> box, ArrayView<const void> source,
ArrayView<void> dest, const MaskData& mask) {
assert(source.dtype() == dest.dtype());
assert(internal::RangesEqual(box.shape(), source.shape()));
assert(internal::RangesEqual(box.shape(), dest.shape()));
const Index num_elements = box.num_elements();
if (mask.num_masked_elements == num_elements) return;
DataType dtype = source.dtype();
if (mask.num_masked_elements == 0) {
[[maybe_unused]] const auto success = internal::IterateOverArrays(
{&dtype->copy_assign, nullptr},
nullptr, skip_repeated_elements, source, dest);
assert(success);
return;
}
Index mask_byte_strides_storage[kMaxRank];
const tensorstore::span<Index> mask_byte_strides(
&mask_byte_strides_storage[0], box.rank());
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), box.shape(),
mask_byte_strides);
std::unique_ptr<bool[], FreeDeleter> mask_owner;
bool* mask_array_ptr;
if (!mask.mask_array) {
mask_owner = CreateMaskArray(box, mask.region, mask_byte_strides);
mask_array_ptr = mask_owner.get();
} else {
mask_array_ptr = mask.mask_array.get();
}
ArrayView<const bool> mask_array(
mask_array_ptr, StridedLayoutView<>(box.shape(), mask_byte_strides));
[[maybe_unused]] const auto success = internal::IterateOverArrays(
{&dtype->copy_assign_unmasked, nullptr},
nullptr, skip_repeated_elements, source, dest, mask_array);
assert(success);
}
void WriteToMask(MaskData* mask, BoxView<> output_box,
IndexTransformView<> input_to_output, Arena* arena) {
assert(input_to_output.output_rank() == output_box.rank());
if (input_to_output.domain().box().is_empty()) {
return;
}
const DimensionIndex output_rank = output_box.rank();
Box<dynamic_rank(kNumInlinedDims)> output_range(output_rank);
const bool range_is_exact =
GetOutputRange(input_to_output, output_range).value();
Intersect(output_range, output_box, output_range);
Index mask_byte_strides_storage[kMaxRank];
const tensorstore::span<Index> mask_byte_strides(
&mask_byte_strides_storage[0], output_rank);
ComputeStrides(ContiguousLayoutOrder::c, sizeof(bool), output_box.shape(),
mask_byte_strides);
StridedLayoutView<dynamic_rank, offset_origin> mask_layout(output_box,
mask_byte_strides);
const bool use_mask_array =
output_box.rank() != 0 &&
mask->num_masked_elements != output_box.num_elements() &&
(static_cast<bool>(mask->mask_array) ||
(!Contains(mask->region, output_range) &&
(!range_is_exact || !IsHullEqualToUnion(mask->region, output_range))));
if (use_mask_array && !mask->mask_array) {
CreateMaskArrayFromRegion(output_box, mask, mask_byte_strides);
}
Hull(mask->region, output_range, mask->region);
if (use_mask_array) {
auto mask_iterable =
GetTransformedArrayNDIterable(
ArrayView<Shared<bool>, dynamic_rank, offset_origin>(
AddByteOffset(
SharedElementPointer<bool>(
UnownedToShared(mask->mask_array.get())),
-IndexInnerProduct(output_box.origin(),
tensorstore::span(mask_byte_strides))),
mask_layout),
input_to_output, arena)
.value();
SetMaskAndCountChanged set_mask_context;
constexpr ElementwiseFunction<1> set_mask_func =
internal::SimpleElementwiseFunction<SetMaskAndCountChanged(bool)>();
auto status = internal::IterateOverNDIterables<1, true>(
input_to_output.input_shape(), skip_repeated_elements,
{{mask_iterable.get()}}, arena, {&set_mask_func, &set_mask_context});
mask->num_masked_elements += set_mask_context.num_changed;
status.IgnoreError();
assert(status.ok());
} else {
mask->num_masked_elements = mask->region.num_elements();
}
}
}
} | #include "tensorstore/internal/masked_array.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/masked_array_testutil.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::ArrayView;
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeArrayView;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::offset_origin;
using ::tensorstore::SharedArray;
using ::tensorstore::StridedLayout;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::ElementCopyFunction;
using ::tensorstore::internal::MaskData;
using ::tensorstore::internal::SimpleElementwiseFunction;
class MaskedArrayTester {
public:
explicit MaskedArrayTester(BoxView<> box)
: box_(box),
mask_(box.rank()),
mask_layout_zero_origin_(tensorstore::ContiguousLayoutOrder::c,
sizeof(bool), box.shape()) {}
ArrayView<const bool> mask_array() const {
if (!mask_.mask_array) return {};
return ArrayView<const bool>(mask_.mask_array.get(),
mask_layout_zero_origin_);
}
Index num_masked_elements() const { return mask_.num_masked_elements; }
BoxView<> mask_region() const { return mask_.region; }
const MaskData& mask() const { return mask_; }
BoxView<> domain() const { return box_; }
void Combine(MaskedArrayTester&& other) {
UnionMasks(box_, &mask_, &other.mask_);
}
void Reset() { mask_.Reset(); }
protected:
Box<> box_;
MaskData mask_;
StridedLayout<> mask_layout_zero_origin_;
};
template <typename T>
class MaskedArrayWriteTester : public MaskedArrayTester {
public:
explicit MaskedArrayWriteTester(BoxView<> box)
: MaskedArrayTester(box),
dest_(tensorstore::AllocateArray<T>(box, tensorstore::c_order,
tensorstore::value_init)),
dest_layout_zero_origin_(tensorstore::ContiguousLayoutOrder::c,
sizeof(T), box.shape()) {}
template <typename CopyFunc>
absl::Status Write(IndexTransformView<> dest_transform,
TransformedArray<const T> source, CopyFunc&& copy_func) {
ElementCopyFunction copy_function =
SimpleElementwiseFunction<std::remove_reference_t<CopyFunc>(const T, T),
void*>();
return WriteToMaskedArray(dest_.byte_strided_origin_pointer().get(), &mask_,
dest_.domain(), dest_transform, source,
{©_function, ©_func});
}
absl::Status Write(IndexTransformView<> dest_transform,
TransformedArray<const T> source) {
return Write(dest_transform, source,
[](const T* source, T* dest, void*) { *dest = *source; });
}
void Rebase(ArrayView<const T> source) {
RebaseMaskedArray(
box_, source,
tensorstore::ArrayOriginCast<tensorstore::zero_origin>(dest_).value(),
mask_);
}
IndexTransform<> transform() const {
return tensorstore::IdentityTransform(dest_.domain());
}
ArrayView<const T> dest_array() const {
return ArrayView<const T>(dest_.byte_strided_origin_pointer().get(),
dest_layout_zero_origin_);
}
private:
SharedArray<T, dynamic_rank, offset_origin> dest_;
StridedLayout<> dest_layout_zero_origin_;
};
TEST(MaskDataTest, Construct) {
MaskData mask(3);
EXPECT_FALSE(mask.mask_array);
EXPECT_EQ(0, mask.num_masked_elements);
EXPECT_EQ(0, mask.region.num_elements());
}
TEST(WriteToMaskedArrayTest, RankZero) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
TENSORSTORE_EXPECT_OK(tester.Write(tester.transform(), MakeScalarArray(5)));
EXPECT_EQ(1, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(5), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankZeroError) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
EXPECT_THAT(
tester.Write(
tester.transform(), MakeScalarArray(5),
[](const int* source, int* dest, void* status) { return false; }),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(0), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOneNoElementsWritten) {
MaskedArrayWriteTester<int> tester{BoxView<>(0)};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).AddNew().SizedInterval(0, 0)).value(),
MakeArrayView(tensorstore::span<const int>{})));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeScalarArray(0), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOne) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {10})};
TENSORSTORE_EXPECT_OK(
tester.Write((tester.transform() | Dims(0).SizedInterval(2, 3)).value(),
MakeOffsetArray({2}, {1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({0, 1, 2, 3, 0, 0, 0, 0, 0, 0}), tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(5, 2)).value(),
MakeArray({4, 5})));
EXPECT_EQ(5, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {5}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArrayView({0, 1, 2, 3, 4, 5, 0, 0, 0, 0}), tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(9, 2)).value(),
MakeArray({6, 7})));
EXPECT_EQ(7, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {9}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({0, 1, 1, 1, 1, 1, 0, 0, 1, 1}),
tester.mask_array());
EXPECT_EQ(MakeArray({0, 1, 2, 3, 4, 5, 0, 0, 6, 7}), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOneStrided) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {8})};
auto input_to_output = IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({3})
.output_single_input_dimension(0, -2, 2, 0)
.Finalize()
.value();
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).SizedInterval(2, 3, 2).TranslateTo(0))
.value(),
MakeArray({1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(MakeArray<bool>({0, 1, 0, 1, 0, 1, 0, 0}), tester.mask_array());
EXPECT_EQ(MakeArray({0, 1, 0, 2, 0, 3, 0, 0}), tester.dest_array());
EXPECT_EQ(BoxView({2}, {5}), tester.mask_region());
}
TEST(WriteToMaskedArrayTest, RankTwo) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 1, 2, 0, 0},
{0, 3, 4, 0, 0},
{0, 5, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 2}, {3, 2}))
.value(),
MakeArray({
{7, 8},
{9, 0},
{1, 2},
})));
EXPECT_EQ(9, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 2}, {3, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{7, 8, 2, 0, 0},
{9, 0, 4, 0, 0},
{1, 2, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({3, 5}, {2, 2}))
.value(),
MakeArray({
{5, 6},
{7, 8},
})));
EXPECT_EQ(13, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 2}, {3, 5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({
{0, 0, 0, 0, 0},
{1, 1, 1, 0, 0},
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
}),
tester.mask_array());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{7, 8, 2, 0, 0},
{9, 0, 4, 5, 6},
{1, 2, 6, 7, 8},
}),
tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankTwoNonExactContainedInExistingMaskRegion) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 1, 2, 0, 0},
{0, 3, 4, 0, 0},
{0, 5, 6, 0, 0},
}),
tester.dest_array());
TENSORSTORE_EXPECT_OK(
tester.Write((tester.transform() |
Dims(0, 1).TranslateSizedInterval({2, 3}, {2, 2}, {2, 1}))
.value(),
MakeArray({
{7, 8},
{9, 0},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {3, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0, 0, 0},
{0, 7, 8, 0, 0},
{0, 3, 4, 0, 0},
{0, 9, 0, 0, 0},
}),
tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankTwoPartialCopy) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
EXPECT_THAT(
tester.Write((tester.transform() |
Dims(0, 1).TranslateSizedInterval({2, 3}, {3, 2}))
.value(),
MakeArray({
{1, 2},
{3, 4},
{5, 6},
}),
[](const int* source, int* dest, void* arg) {
if (*source == 4) return false;
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(WriteToMaskedArrayTest, RankTwoIndexArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {4, 5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 2},
{1, 4},
{2, 3},
})))
.value(),
MakeArray({1, 2, 3})));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {4, 5}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 0, 2, 0, 0},
{0, 3, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.mask_array());
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 3},
{1, 4},
{2, 3},
})))
.value(),
MakeArray({4, 5, 6})));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {4, 5}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 4, 5, 0, 0},
{0, 6, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 1, 1, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
tester.mask_array());
}
TEST(WriteToMaskedArrayTest, IndexArrayLarge) {
const Index kSize = 32768;
auto index_array = tensorstore::AllocateArray<Index>({kSize});
for (Index i = 0; i < kSize; ++i) {
index_array(i) = i;
}
auto fill_array =
tensorstore::BroadcastArray(tensorstore::MakeScalarArray<int>(42),
tensorstore::span<const Index>({2, kSize}))
.value();
auto mask_array =
tensorstore::BroadcastArray(tensorstore::MakeScalarArray<bool>(true),
tensorstore::span<const Index>({2, kSize}))
.value();
MaskedArrayWriteTester<int> tester{fill_array.domain()};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(1).OuterIndexArraySlice(index_array)).value(),
fill_array));
EXPECT_EQ(fill_array.num_elements(), tester.num_masked_elements());
EXPECT_EQ(fill_array.domain(), tester.mask_region());
EXPECT_EQ(fill_array, tester.dest_array());
EXPECT_EQ(mask_array, tester.mask_array());
}
TEST(WriteToMaskedArrayTest, RankOneInvalidTransform) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {4})};
EXPECT_THAT(
tester.Write((tester.transform() | Dims(0).SizedInterval(2, 3)).value(),
MakeOffsetArray({1}, {1, 2, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({0, 0, 0, 0}), tester.dest_array());
}
TEST(WriteToMaskedArrayTest, RankOnePartialCopyDefaultError) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
EXPECT_THAT(
tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3}),
[](const int* source, int* dest, void* arg) {
if (*source == 2) return false;
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "Data conversion failure."));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(WriteToMaskedArrayTest, RankOnePartialCopyCustomError) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
EXPECT_THAT(
tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3}),
[](const int* source, int* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 2) {
*status = absl::UnknownError("My custom error");
return false;
}
*dest = *source;
return true;
}),
MatchesStatus(absl::StatusCode::kUnknown, "My custom error"));
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(RebaseMaskedArrayTest, Empty) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
tester.Rebase(MakeArray({
{1, 2, 3},
{4, 5, 6},
}));
EXPECT_EQ(0, tester.num_masked_elements());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, Full) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({1, 2}, {2, 3}))
.value(),
MakeArray({
{1, 2, 3},
{4, 5, 6},
})));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
tester.Rebase(MakeArray({
{7, 7, 7},
{7, 7, 7},
}));
EXPECT_EQ(6, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{1, 2, 3},
{4, 5, 6},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, NoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).TranslateSizedInterval({2, 3}, {1, 2}))
.value(),
MakeArray({
{1, 2},
})));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {1, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{0, 0, 0},
{0, 1, 2},
}),
tester.dest_array());
tester.Rebase(MakeArray({
{3, 4, 5},
{6, 7, 8},
}));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({2, 3}, {1, 2}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(MakeArray({
{3, 4, 5},
{6, 1, 2},
}),
tester.dest_array());
}
TEST(RebaseMaskedArrayTest, MaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1, 2}, {2, 3})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0, 1).IndexVectorArraySlice(MakeArray<Index>({
{1, 2},
{1, 4},
})))
.value(),
MakeArray({1, 2})));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 0, 2},
{0, 0, 0},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1},
{0, 0, 0},
}),
tester.mask_array());
tester.Rebase(MakeArray({
{3, 4, 5},
{6, 7, 8},
}));
EXPECT_EQ(2, tester.num_masked_elements());
EXPECT_EQ(BoxView({1, 2}, {2, 3}), tester.mask_region());
EXPECT_EQ(MakeArray({
{1, 4, 2},
{6, 7, 8},
}),
tester.dest_array());
EXPECT_EQ(MakeArray<bool>({
{1, 0, 1},
{0, 0, 0},
}),
tester.mask_array());
}
TEST(UnionMasksTest, FirstEmpty) {
MaskedArrayTester tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, SecondEmpty) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayTester tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(2, 3)).value(),
MakeArray({1, 2, 3})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({2}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, MaskArrayAndMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).IndexArraySlice(MakeArray<Index>({1, 3})))
.value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).IndexArraySlice(MakeArray<Index>({1, 4})))
.value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 0}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {4}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, NoMaskArrayAndNoMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(2, 2)).value(),
MakeArray({1, 2})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(UnionMasksTest, NoMaskArrayAndNoMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 1, 0, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndNoMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, NoMaskArrayAndMaskArrayEqualsMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(4, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {5}), tester.mask_region());
EXPECT_EQ(MakeArray<bool>({1, 0, 1, 1, 1}), tester.mask_array());
}
TEST(UnionMasksTest, MaskArrayAndNoMaskArrayEqualsNoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
MaskedArrayWriteTester<int> tester_b{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
TENSORSTORE_EXPECT_OK(tester_b.Write(
(tester_b.transform() | Dims(0).TranslateSizedInterval(1, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester_b.mask_array().valid());
tester.Combine(std::move(tester_b));
EXPECT_EQ(3, tester.num_masked_elements());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_FALSE(tester.mask_array().valid());
}
TEST(ResetTest, NoMaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(4, 2)).value(),
MakeArray({1, 2})));
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_EQ(BoxView({4}, {2}), tester.mask_region());
EXPECT_EQ(2, tester.num_masked_elements());
tester.Reset();
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_EQ(0, tester.num_masked_elements());
}
TEST(ResetTest, MaskArray) {
MaskedArrayWriteTester<int> tester{BoxView({1}, {5})};
TENSORSTORE_EXPECT_OK(tester.Write(
(tester.transform() | Dims(0).TranslateSizedInterval(1, 2, 2)).value(),
MakeArray({1, 2})));
EXPECT_TRUE(tester.mask_array().valid());
EXPECT_EQ(BoxView({1}, {3}), tester.mask_region());
EXPECT_EQ(2, tester.num_masked_elements());
tester.Reset();
EXPECT_FALSE(tester.mask_array().valid());
EXPECT_TRUE(tester.mask_region().is_empty());
EXPECT_EQ(0, tester.num_masked_elements());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/masked_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/masked_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8260e578-71f7-49bc-94f4-3c2cd85c6dbc | cpp | google/tensorstore | async_write_array | tensorstore/internal/async_write_array.cc | tensorstore/internal/async_write_array_test.cc | #include "tensorstore/internal/async_write_array.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/masked_array.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
Index AsyncWriteArray::Spec::GetNumInBoundsElements(BoxView<> domain) const {
const DimensionIndex rank = this->rank();
assert(domain.rank() == rank);
Index product = 1;
const BoxView<> bounds = this->valid_data_bounds;
for (DimensionIndex i = 0; i < rank; ++i) {
product *= Intersect(bounds[i], domain[i]).size();
}
return product;
}
SharedArrayView<const void> AsyncWriteArray::Spec::GetFillValueForDomain(
BoxView<> domain) const {
const DimensionIndex rank = domain.rank();
assert(Contains(overall_fill_value.domain(), domain));
return SharedArrayView<const void>(
AddByteOffset(
overall_fill_value.element_pointer(),
IndexInnerProduct(rank, overall_fill_value.byte_strides().data(),
domain.origin().data())),
StridedLayoutView<>(rank, domain.shape().data(),
overall_fill_value.byte_strides().data()));
}
Result<NDIterable::Ptr> AsyncWriteArray::Spec::GetReadNDIterable(
SharedArrayView<const void> array, BoxView<> domain,
IndexTransform<> chunk_transform, Arena* arena) const {
if (!array.valid()) array = GetFillValueForDomain(domain);
assert(internal::RangesEqual(array.shape(), domain.shape()));
StridedLayoutView<dynamic_rank, offset_origin> data_layout(
domain, array.byte_strides());
TENSORSTORE_ASSIGN_OR_RETURN(
chunk_transform,
ComposeLayoutAndTransform(data_layout, std::move(chunk_transform)));
return GetTransformedArrayNDIterable(
{AddByteOffset(std::move(array.element_pointer()),
-data_layout.origin_byte_offset()),
std::move(chunk_transform)},
arena);
}
AsyncWriteArray::MaskedArray::MaskedArray(DimensionIndex rank) : mask(rank) {}
void AsyncWriteArray::MaskedArray::WriteFillValue(const Spec& spec,
BoxView<> domain) {
array = {};
mask.Reset();
mask.num_masked_elements = domain.num_elements();
mask.region = domain;
}
AsyncWriteArray::WritebackData
AsyncWriteArray::MaskedArray::GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
bool read_state_already_integrated) {
assert(domain.rank() == spec.rank());
const auto must_store = [&](ArrayView<const void> array) {
if (spec.store_if_equal_to_fill_value) return true;
return !AreArraysEqual(array, spec.GetFillValueForDomain(domain),
spec.fill_value_comparison_kind);
};
const auto get_writeback_from_array = [&] {
WritebackData writeback;
writeback.array = array;
writeback.must_store = must_store(writeback.array);
if (!writeback.must_store) {
array = {};
writeback.array = spec.GetFillValueForDomain(domain);
writeback.may_retain_reference_to_array_indefinitely = true;
} else {
writeback.may_retain_reference_to_array_indefinitely =
(array_capabilities <= kImmutableAndCanRetainIndefinitely);
}
return writeback;
};
if (!array.valid()) {
if (IsFullyOverwritten(spec, domain)) {
WritebackData writeback;
writeback.array = spec.GetFillValueForDomain(domain);
writeback.must_store = false;
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (IsUnmodified()) {
WritebackData writeback;
writeback.must_store = read_array.valid() && must_store(read_array);
if (writeback.must_store) {
writeback.array = read_array;
} else {
writeback.array = spec.GetFillValueForDomain(domain);
}
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (!read_state_already_integrated && read_array.valid()) {
array_capabilities = kMutableArray;
array = tensorstore::MakeCopy(spec.GetFillValueForDomain(domain),
{c_order, include_repeated_elements});
RebaseMaskedArray(domain, ArrayView<const void>(read_array), array, mask);
return get_writeback_from_array();
}
WritebackData writeback;
writeback.array = spec.GetFillValueForDomain(domain);
writeback.must_store = false;
writeback.may_retain_reference_to_array_indefinitely = true;
return writeback;
}
if (!read_state_already_integrated &&
mask.num_masked_elements != domain.num_elements()) {
EnsureWritable(spec);
RebaseMaskedArray(
domain,
read_array.valid()
? ArrayView<const void>(read_array)
: ArrayView<const void>(spec.GetFillValueForDomain(domain)),
array, mask);
}
return get_writeback_from_array();
}
size_t AsyncWriteArray::MaskedArray::EstimateSizeInBytes(
const Spec& spec, tensorstore::span<const Index> shape) const {
size_t total = 0;
if (array.valid()) {
total += GetByteExtent(array);
}
if (mask.mask_array) {
const Index num_elements = ProductOfExtents(shape);
total += num_elements * sizeof(bool);
}
return total;
}
void AsyncWriteArray::MaskedArray::EnsureWritable(const Spec& spec) {
assert(array.valid());
auto new_array =
tensorstore::AllocateArray(array.shape(), tensorstore::c_order,
tensorstore::default_init, spec.dtype());
CopyArray(array, new_array);
array = std::move(new_array);
array_capabilities = kMutableArray;
}
Result<TransformedSharedArray<void>>
AsyncWriteArray::MaskedArray::GetWritableTransformedArray(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform) {
if (!array.valid()) {
this->array =
tensorstore::AllocateArray(domain.shape(), tensorstore::c_order,
tensorstore::default_init, spec.dtype());
array_capabilities = kMutableArray;
if (IsFullyOverwritten(spec, domain)) {
CopyArray(spec.GetFillValueForDomain(domain), this->array);
} else {
assert(IsUnmodified());
}
} else if (array_capabilities != kMutableArray) {
EnsureWritable(spec);
}
StridedLayoutView<dynamic_rank, offset_origin> data_layout{
domain, this->array.byte_strides()};
TENSORSTORE_ASSIGN_OR_RETURN(
chunk_transform,
ComposeLayoutAndTransform(data_layout, std::move(chunk_transform)));
return {std::in_place,
UnownedToShared(
AddByteOffset(ElementPointer<void>(this->array.element_pointer()),
-data_layout.origin_byte_offset())),
std::move(chunk_transform)};
}
Result<NDIterable::Ptr> AsyncWriteArray::MaskedArray::BeginWrite(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform,
Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_array,
GetWritableTransformedArray(spec, domain, std::move(chunk_transform)));
return GetTransformedArrayNDIterable(std::move(transformed_array), arena);
}
void AsyncWriteArray::MaskedArray::EndWrite(
const Spec& spec, BoxView<> domain, IndexTransformView<> chunk_transform,
Arena* arena) {
WriteToMask(&mask, domain, chunk_transform, arena);
}
void AsyncWriteArray::MaskedArray::Clear() {
mask.Reset();
array = {};
}
AsyncWriteArray::AsyncWriteArray(DimensionIndex rank) : write_state(rank) {}
AsyncWriteArray::WritebackData AsyncWriteArray::GetArrayForWriteback(
const Spec& spec, BoxView<> domain,
const SharedArrayView<const void>& read_array,
const StorageGeneration& read_generation) {
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array, read_generation == this->read_generation);
if (write_state.array.valid()) this->read_generation = read_generation;
return writeback_data;
}
Result<NDIterable::Ptr> AsyncWriteArray::GetReadNDIterable(
const Spec& spec, BoxView<> domain, SharedArrayView<const void> read_array,
const StorageGeneration& read_generation, IndexTransform<> chunk_transform,
Arena* arena) {
if (!read_array.valid()) read_array = spec.GetFillValueForDomain(domain);
if (!write_state.IsUnmodified()) {
if (write_state.IsFullyOverwritten(spec, domain)) {
if (!write_state.array.valid()) {
read_array = spec.GetFillValueForDomain(domain);
}
} else if (this->read_generation != read_generation) {
assert(write_state.array.valid());
if (write_state.array_capabilities != MaskedArray::kMutableArray) {
write_state.EnsureWritable(spec);
}
RebaseMaskedArray(domain, read_array, write_state.array,
write_state.mask);
this->read_generation = read_generation;
}
if (write_state.array.valid()) {
read_array = write_state.array;
}
}
return spec.GetReadNDIterable(std::move(read_array), domain,
std::move(chunk_transform), arena);
}
namespace {
bool ZeroCopyToWriteArray(
const AsyncWriteArray::Spec& spec, BoxView<> domain,
IndexTransformView<> chunk_transform,
TransformedSharedArray<const void> source_array,
AsyncWriteArray::WriteArraySourceCapabilities source_capabilities,
AsyncWriteArray::MaskedArray& write_state) {
assert(source_capabilities !=
AsyncWriteArray::WriteArraySourceCapabilities::kCannotRetain);
const DimensionIndex dest_rank = domain.rank();
assert(spec.rank() == dest_rank);
assert(chunk_transform.output_rank() == dest_rank);
IndexTransformView<> source_transform = source_array.transform();
const DimensionIndex input_rank = chunk_transform.input_rank();
assert(source_transform.input_rank() == input_rank);
assert(source_transform.domain().box() == chunk_transform.domain().box());
Index new_byte_strides[kMaxRank];
DimensionIndex dest_dim_for_input_dim[kMaxRank];
std::fill_n(dest_dim_for_input_dim, input_rank, DimensionIndex(-1));
std::fill_n(new_byte_strides, dest_rank, Index(0));
for (DimensionIndex dest_dim = 0; dest_dim < dest_rank; ++dest_dim) {
if (domain.shape()[dest_dim] == 1) continue;
auto map = chunk_transform.output_index_map(dest_dim);
if (map.method() != OutputIndexMethod::single_input_dimension) {
continue;
}
[[maybe_unused]] DimensionIndex prev_dest_dim =
std::exchange(dest_dim_for_input_dim[map.input_dimension()], dest_dim);
assert(prev_dest_dim == -1);
}
const DimensionIndex source_output_rank = source_transform.output_rank();
Index source_offset = 0;
for (DimensionIndex source_output_dim = 0;
source_output_dim < source_output_rank; ++source_output_dim) {
auto map = source_transform.output_index_map(source_output_dim);
source_offset =
internal::wrap_on_overflow::Add(source_offset, map.offset());
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const DimensionIndex dest_dim = dest_dim_for_input_dim[input_dim];
const Index source_stride = map.stride();
if (dest_dim == -1) {
assert(source_transform.input_shape()[input_dim] == 1);
const Index source_origin =
source_transform.input_origin()[input_dim];
source_offset = internal::wrap_on_overflow::Add(
source_offset, internal::wrap_on_overflow::Multiply(
source_origin, source_stride));
break;
}
const auto dest_map = chunk_transform.output_index_map(dest_dim);
const Index dest_stride = dest_map.stride();
assert(dest_stride == 1 || dest_stride == -1);
new_byte_strides[dest_dim] = internal::wrap_on_overflow::Add(
new_byte_strides[dest_dim],
internal::wrap_on_overflow::Multiply(source_stride, dest_stride));
break;
}
case OutputIndexMethod::array:
return false;
}
}
for (DimensionIndex dest_dim = 0; dest_dim < dest_rank; ++dest_dim) {
auto map = chunk_transform.output_index_map(dest_dim);
source_offset = internal::wrap_on_overflow::Subtract(
source_offset, internal::wrap_on_overflow::Multiply(
new_byte_strides[dest_dim], map.offset()));
}
auto& new_array = write_state.array;
new_array.layout() =
StridedLayoutView<>(dest_rank, domain.shape().data(), new_byte_strides);
source_offset = internal::wrap_on_overflow::Add(
source_offset,
IndexInnerProduct(dest_rank, domain.origin().data(), new_byte_strides));
new_array.element_pointer() = AddByteOffset(
SharedElementPointer<void>(internal::const_pointer_cast<void>(std::move(
source_array.element_pointer().pointer())),
spec.dtype()),
source_offset);
using WriteArraySourceCapabilities =
AsyncWriteArray::WriteArraySourceCapabilities;
using MaskedArray = AsyncWriteArray::MaskedArray;
switch (source_capabilities) {
case WriteArraySourceCapabilities::kCannotRetain:
ABSL_UNREACHABLE();
case WriteArraySourceCapabilities::kMutable:
write_state.array_capabilities = MaskedArray::kMutableArray;
break;
case WriteArraySourceCapabilities::kImmutableAndCanRetainIndefinitely:
write_state.array_capabilities =
MaskedArray::kImmutableAndCanRetainIndefinitely;
break;
case WriteArraySourceCapabilities::kImmutableAndCanRetainUntilCommit:
write_state.array_capabilities =
MaskedArray::kImmutableAndCanRetainUntilCommit;
break;
}
return true;
}
}
absl::Status AsyncWriteArray::WriteArray(
const Spec& spec, BoxView<> domain, IndexTransformView<> chunk_transform,
absl::FunctionRef<Result<std::pair<TransformedSharedArray<const void>,
WriteArraySourceCapabilities>>()>
get_source_array) {
[[maybe_unused]] const DimensionIndex dest_rank = spec.rank();
assert(domain.rank() == dest_rank);
assert(chunk_transform.output_rank() == dest_rank);
Box<dynamic_rank(kMaxRank)> output_range(spec.rank());
TENSORSTORE_ASSIGN_OR_RETURN(
bool output_range_exact,
tensorstore::GetOutputRange(chunk_transform, output_range));
if (!output_range_exact || output_range != domain) {
return absl::CancelledError();
}
TENSORSTORE_ASSIGN_OR_RETURN(auto source_array_info, get_source_array());
auto source_capabilities = std::get<1>(source_array_info);
if (source_capabilities == WriteArraySourceCapabilities::kCannotRetain) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto dest_transformed_array,
write_state.GetWritableTransformedArray(spec, domain, chunk_transform));
TENSORSTORE_RETURN_IF_ERROR(CopyTransformedArray(
std::get<0>(source_array_info), dest_transformed_array));
} else {
if (!ZeroCopyToWriteArray(spec, domain, chunk_transform,
std::get<0>(source_array_info),
source_capabilities, write_state)) {
return absl::CancelledError();
}
}
write_state.mask.Reset();
write_state.mask.num_masked_elements = domain.num_elements();
write_state.mask.region = domain;
return absl::OkStatus();
}
Result<NDIterable::Ptr> AsyncWriteArray::BeginWrite(
const Spec& spec, BoxView<> domain, IndexTransform<> chunk_transform,
Arena* arena) {
return write_state.BeginWrite(spec, domain, std::move(chunk_transform),
arena);
}
void AsyncWriteArray::EndWrite(const Spec& spec, BoxView<> domain,
IndexTransformView<> chunk_transform,
bool success, Arena* arena) {
if (!success) {
InvalidateReadState();
return;
}
write_state.EndWrite(spec, domain, chunk_transform, arena);
}
}
} | #include "tensorstore/internal/async_write_array.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include <random>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::ReferencesSameDataAs;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::AsyncWriteArray;
using MaskedArray = AsyncWriteArray::MaskedArray;
using Spec = AsyncWriteArray::Spec;
tensorstore::SharedArray<void> CopyNDIterable(
tensorstore::internal::NDIterable::Ptr source_iterable,
tensorstore::span<const Index> shape, Arena* arena) {
auto dest_array = tensorstore::AllocateArray(shape, tensorstore::c_order,
tensorstore::default_init,
source_iterable->dtype());
auto dest_iterable =
tensorstore::internal::GetArrayNDIterable(dest_array, arena);
tensorstore::internal::NDIterableCopier copier(*source_iterable,
*dest_iterable, shape, arena);
TENSORSTORE_EXPECT_OK(copier.Copy());
return dest_array;
}
template <typename Target>
void TestWrite(Target* target, const Spec& spec, BoxView<> domain,
tensorstore::SharedOffsetArrayView<const void> source_array) {
Arena arena;
auto transform = tensorstore::IdentityTransform(source_array.domain());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto dest_iterable, target->BeginWrite(spec, domain, transform, &arena));
auto source_iterable =
tensorstore::internal::GetArrayNDIterable(source_array, &arena);
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *dest_iterable, source_array.shape(), &arena);
TENSORSTORE_EXPECT_OK(copier.Copy());
if constexpr (std::is_same_v<Target, AsyncWriteArray>) {
target->EndWrite(spec, domain, transform, true, &arena);
} else {
target->EndWrite(spec, domain, transform, &arena);
}
}
TEST(SpecTest, Basic) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
EXPECT_EQ(6, spec.GetNumInBoundsElements(BoxView<>({0, 0}, {2, 3})));
EXPECT_EQ(3, spec.GetNumInBoundsElements(BoxView<>({-2, 0}, {2, 3})));
EXPECT_EQ(2, spec.rank());
EXPECT_EQ(tensorstore::dtype_v<int32_t>, spec.dtype());
EXPECT_EQ(0, spec.EstimateReadStateSizeInBytes(
false, tensorstore::span<const Index>({2, 3})));
EXPECT_EQ(2 * 3 * sizeof(int32_t),
spec.EstimateReadStateSizeInBytes(
true, tensorstore::span<const Index>({2, 3})));
{
auto read_array = MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
spec.GetReadNDIterable(
read_array, BoxView<>({2, 6}, {2, 3}),
tensorstore::IdentityTransform(tensorstore::Box<>({2, 6}, {2, 2})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 8}, {10, 11}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 2}), &arena));
}
}
TEST(MaskedArrayTest, Basic) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
auto fill_value_copy = MakeArray<int32_t>({{21, 22, 23}, {31, 32, 33}});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
MaskedArray write_state(2);
Box<> domain({0, 0}, {2, 3});
EXPECT_EQ(0, write_state.EstimateSizeInBytes(spec, domain.shape()));
EXPECT_TRUE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.array.valid());
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, {},
false);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, fill_value_copy,
false);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_EQ(read_array, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
TestWrite(&write_state, spec, domain,
tensorstore::AllocateArray<int32_t>(
tensorstore::BoxView<>({1, 1}, {0, 0})));
EXPECT_TRUE(write_state.array.valid());
EXPECT_TRUE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
EXPECT_EQ(2 * 3 * sizeof(int32_t),
write_state.EstimateSizeInBytes(spec, domain.shape()));
std::fill_n(static_cast<int32_t*>(write_state.array.data()),
domain.num_elements(), 0);
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
EXPECT_EQ(MakeArray<int32_t>({{0, 0, 0}, {0, 7, 8}}),
write_state.shared_array_view(spec));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}}));
EXPECT_EQ(MakeArray<int32_t>({{9, 0, 0}, {0, 7, 8}}),
write_state.shared_array_view(spec));
EXPECT_EQ(MakeArray<bool>({{1, 0, 0}, {0, 1, 1}}),
tensorstore::Array(write_state.mask.mask_array.get(), {2, 3}));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.IsFullyOverwritten(spec, domain));
EXPECT_EQ(2 * 3 * (sizeof(int32_t) + sizeof(bool)),
write_state.EstimateSizeInBytes(spec, domain.shape()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, {},
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 7, 8}}),
writeback_data.array);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
true);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 7, 8}}),
writeback_data.array);
}
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{9, 12, 13}, {14, 7, 8}}),
writeback_data.array);
}
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}, {9}}));
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{10, 10, 10}}));
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{10, 10, 10}, {9, 7, 8}}),
writeback_data.array);
}
TestWrite(&write_state, spec, domain, fill_value_copy);
EXPECT_TRUE(write_state.array.valid());
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(write_state.array.valid());
}
write_state.Clear();
EXPECT_TRUE(write_state.IsUnmodified());
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
write_state.WriteFillValue(spec, domain);
EXPECT_FALSE(write_state.IsUnmodified());
EXPECT_FALSE(write_state.array.valid());
EXPECT_EQ(0, write_state.EstimateSizeInBytes(spec, domain.shape()));
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, domain, read_array,
false);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(write_state.array.valid());
}
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1, 1}, {{7, 8}}));
EXPECT_EQ(MakeArray<int32_t>({{21, 22, 23}, {31, 7, 8}}),
write_state.shared_array_view(spec));
}
TEST(MaskedArrayTest, PartialChunk) {
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{-2, 0}, {2, 3}};
MaskedArray write_state(2);
TestWrite(&write_state, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({-1, 0}, {{7, 8, 9}}));
EXPECT_TRUE(write_state.IsFullyOverwritten(spec, domain));
}
TEST(MaskedArrayTest, StoreIfEqualToFillValue) {
auto overall_fill_value = MakeScalarArray<int32_t>(42);
tensorstore::Box<> component_bounds;
Spec spec{overall_fill_value, component_bounds};
spec.store_if_equal_to_fill_value = true;
MaskedArray write_state(0);
TestWrite(&write_state, spec, {}, tensorstore::MakeScalarArray<int32_t>(42));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_EQ(overall_fill_value, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
auto read_array = MakeScalarArray<int32_t>(50);
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, read_array,
false);
EXPECT_EQ(overall_fill_value, writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
}
}
TEST(MaskedArrayTest, CompareFillValueIdenticallyEqual) {
auto fill_value =
MakeScalarArray<float>(std::numeric_limits<float>::quiet_NaN());
tensorstore::Box<> component_bounds;
Spec spec{fill_value, component_bounds};
spec.fill_value_comparison_kind =
tensorstore::EqualityComparisonKind::identical;
MaskedArray write_state(0);
TestWrite(&write_state, spec, {},
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::signaling_NaN()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_TRUE(AreArraysIdenticallyEqual(
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::signaling_NaN()),
writeback_data.array));
EXPECT_TRUE(writeback_data.must_store);
}
TestWrite(&write_state, spec, {},
tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::quiet_NaN()));
{
auto writeback_data = write_state.GetArrayForWriteback(
spec, {}, {},
false);
EXPECT_TRUE(
AreArraysIdenticallyEqual(tensorstore::MakeScalarArray<float>(
std::numeric_limits<float>::quiet_NaN()),
writeback_data.array));
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(fill_value.data(), writeback_data.array.data());
}
}
TEST(AsyncWriteArrayTest, Basic) {
AsyncWriteArray async_write_array(2);
auto overall_fill_value = MakeOffsetArray<int32_t>(
{-2, 0}, {
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{11, 12, 13, 14, 15, 16, 17, 18, 19},
{21, 22, 23, 24, 25, 26, 27, 28, 29},
{31, 32, 33, 34, 35, 36, 37, 38, 39},
});
tensorstore::Box<> component_bounds({-1, -kInfIndex}, {3, kInfSize});
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{0, 0}, {2, 3}};
auto fill_value_copy = MakeArray<int32_t>({{21, 22, 23}, {31, 32, 33}});
{
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, {},
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 1}, {2, 2})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{22, 23}, {32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 2}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{21, 22, 23}, {24, 25, 26}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("b"));
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(read_array, writeback_data.array);
EXPECT_EQ(StorageGeneration::Invalid(), async_write_array.read_generation);
}
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{8}}));
{
auto* data_ptr = async_write_array.write_state.array.data();
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{7}}));
EXPECT_EQ(data_ptr, async_write_array.write_state.array.data());
}
{
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, {},
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("a"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
tensorstore::SharedArray<const void> prev_writeback_array;
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("a"));
EXPECT_TRUE(writeback_data.must_store);
prev_writeback_array = writeback_data.array;
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {31, 32, 33}}),
writeback_data.array);
}
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{7, 12, 13}, {14, 15, 16}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
{
auto read_array = MakeArray<int32_t>({{21, 22, 23}, {24, 25, 26}});
Arena arena;
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, read_array,
StorageGeneration::FromString("c"));
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(MakeArray<int32_t>({{7, 22, 23}, {24, 25, 26}}),
writeback_data.array);
EXPECT_EQ(StorageGeneration::FromString("c"),
async_write_array.read_generation);
EXPECT_NE(prev_writeback_array, writeback_data.array);
}
async_write_array.write_state.WriteFillValue(spec, domain);
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(fill_value_copy,
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({0, 0}, {{9}}));
{
auto read_array = MakeArray<int32_t>({{11, 12, 13}, {14, 15, 16}});
Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto iterable,
async_write_array.GetReadNDIterable(
spec, domain, read_array,
StorageGeneration::FromString("b"),
tensorstore::IdentityTransform(tensorstore::Box<>({0, 0}, {2, 3})),
&arena));
EXPECT_EQ(MakeArray<int32_t>({{9, 22, 23}, {31, 32, 33}}),
CopyNDIterable(std::move(iterable),
tensorstore::span<const Index>({2, 3}), &arena));
}
}
TEST(AsyncWriteArrayTest, Issue144) {
AsyncWriteArray async_write_array(1);
auto overall_fill_value = MakeArray<int32_t>({0, 0});
tensorstore::Box<> component_bounds(1);
Spec spec{overall_fill_value, component_bounds};
Box<> domain{{0}, {2}};
TestWrite(&async_write_array, spec, domain,
tensorstore::MakeOffsetArray<int32_t>({1}, {0}));
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, {},
StorageGeneration::FromString("c"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
}
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
for (int i = 0; i < 2; ++i) {
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, {},
StorageGeneration::FromString("d"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
}
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, MakeArray<int32_t>({2, 2}),
StorageGeneration::FromString("e"));
EXPECT_EQ(MakeArray<int32_t>({2, 0}), writeback_data.array);
EXPECT_TRUE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_TRUE(async_write_array.write_state.array.data());
}
{
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, domain, MakeArray<int32_t>({0, 2}),
StorageGeneration::FromString("f"));
EXPECT_EQ(spec.GetFillValueForDomain(domain), writeback_data.array);
EXPECT_FALSE(writeback_data.must_store);
EXPECT_EQ(1, async_write_array.write_state.mask.num_masked_elements);
EXPECT_FALSE(async_write_array.write_state.array.data());
}
}
using WriteArraySourceCapabilities =
AsyncWriteArray::WriteArraySourceCapabilities;
using ArrayCapabilities = AsyncWriteArray::MaskedArray::ArrayCapabilities;
void TestWriteArraySuccess(
WriteArraySourceCapabilities source_capabilities,
ArrayCapabilities expected_array_capabilities, bool may_retain_writeback,
bool zero_copy, tensorstore::IndexTransformView<> chunk_transform,
tensorstore::TransformedSharedArray<const void> source_array) {
SCOPED_TRACE(tensorstore::StrCat("chunk_transform=", chunk_transform));
AsyncWriteArray async_write_array(chunk_transform.output_rank());
tensorstore::Box<> output_range(chunk_transform.output_rank());
ASSERT_THAT(tensorstore::GetOutputRange(chunk_transform, output_range),
::testing::Optional(true));
auto origin = output_range.origin();
SCOPED_TRACE(tensorstore::StrCat("origin=", origin));
auto fill_value =
tensorstore::AllocateArray(output_range, tensorstore::c_order,
tensorstore::value_init, source_array.dtype());
tensorstore::Box<> component_bounds(chunk_transform.output_rank());
Spec spec{fill_value, component_bounds};
size_t orig_use_count = source_array.element_pointer().pointer().use_count();
TENSORSTORE_ASSERT_OK(async_write_array.WriteArray(
spec, output_range, chunk_transform,
[&] { return std::pair{source_array, source_capabilities}; }));
auto validate_zero_copy = [&](const auto& target_array,
size_t orig_use_count) {
EXPECT_EQ((zero_copy ? orig_use_count + 1 : orig_use_count),
source_array.element_pointer().pointer().use_count());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_target_array,
target_array |
tensorstore::AllDims().TranslateTo(output_range.origin()) |
chunk_transform | tensorstore::TryConvertToArray());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_source_array,
source_array | tensorstore::TryConvertToArray());
EXPECT_THAT(
materialized_target_array,
::testing::Conditional(
zero_copy, ReferencesSameDataAs(materialized_source_array),
::testing::Not(ReferencesSameDataAs(materialized_source_array))));
};
{
SCOPED_TRACE(
"Checking async_write_array.write_state.array before calling "
"GetArrayForWriteback");
validate_zero_copy(async_write_array.write_state.array, orig_use_count);
}
EXPECT_EQ(expected_array_capabilities,
async_write_array.write_state.array_capabilities);
{
SCOPED_TRACE("Checking writeback_data");
orig_use_count = source_array.element_pointer().pointer().use_count();
auto writeback_data = async_write_array.GetArrayForWriteback(
spec, output_range, {},
StorageGeneration::Invalid());
validate_zero_copy(writeback_data.array, orig_use_count);
EXPECT_EQ(may_retain_writeback,
writeback_data.may_retain_reference_to_array_indefinitely);
EXPECT_EQ(expected_array_capabilities,
async_write_array.write_state.array_capabilities);
}
}
absl::Status TestWriteArrayError(
WriteArraySourceCapabilities source_capabilities, tensorstore::Box<> box,
tensorstore::IndexTransformView<> chunk_transform,
tensorstore::TransformedSharedArray<const void> source_array) {
AsyncWriteArray async_write_array(chunk_transform.output_rank());
auto fill_value = tensorstore::AllocateArray(
box, tensorstore::c_order, tensorstore::value_init, source_array.dtype());
tensorstore::Box<> component_bounds(chunk_transform.output_rank());
Spec spec{fill_value, component_bounds};
return async_write_array.WriteArray(spec, box, chunk_transform, [&] {
return std::pair{source_array, source_capabilities};
});
}
void TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities source_capabilities,
ArrayCapabilities expected_array_capabilities, bool may_retain_writeback,
bool zero_copy) {
auto source_array = MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
auto chunk_transform = tensorstore::IdentityTransform(source_array.shape());
TestWriteArraySuccess(source_capabilities, expected_array_capabilities,
may_retain_writeback, zero_copy, chunk_transform,
source_array);
}
TEST(WriteArrayIdentityTransformSuccessTest, kCannotRetain) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kCannotRetain,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
false);
}
TEST(WriteArrayIdentityTransformSuccessTest,
kImmutableAndCanRetainIndefinitely) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kImmutableAndCanRetainIndefinitely,
AsyncWriteArray::MaskedArray::kImmutableAndCanRetainIndefinitely,
true,
true);
}
TEST(WriteArrayIdentityTransformSuccessTest,
kImmutableAndCanRetainUntilCommit) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kImmutableAndCanRetainUntilCommit,
AsyncWriteArray::MaskedArray::kImmutableAndCanRetainUntilCommit,
false,
true);
}
TEST(WriteArrayIdentityTransformSuccessTest, kMutable) {
TestWriteArrayIdentityTransformSuccess(
WriteArraySourceCapabilities::kMutable,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
true);
}
TEST(WriteArrayNonIdentityTransformSuccess, kMutable) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_ASYNC_WRITE_ARRAY")};
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
constexpr size_t kNumIterations = 10;
for (size_t iter_i = 0; iter_i < kNumIterations; ++iter_i) {
tensorstore::IndexTransform<> source_transform;
{
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
p;
p.max_stride = 2;
source_transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IndexDomain<>(base_source_array.domain()), p);
}
SCOPED_TRACE(tensorstore::StrCat("source_transform=", source_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto source_array,
base_source_array | source_transform);
auto chunk_transform =
tensorstore::internal::MakeRandomStridedIndexTransformForInputSpace(
gen, source_array.domain());
TestWriteArraySuccess(WriteArraySourceCapabilities::kMutable,
AsyncWriteArray::MaskedArray::kMutableArray,
true,
true, chunk_transform, source_array);
}
}
TEST(WriteArrayErrorTest, SourceArrayIndexArrayMap) {
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto source_array,
base_source_array | tensorstore::Dims(1).OuterIndexArraySlice(
tensorstore::MakeArray<Index>({1, 0, 1, 1, 2})));
auto chunk_transform = tensorstore::IdentityTransform(source_array.domain());
EXPECT_THAT(TestWriteArrayError(WriteArraySourceCapabilities::kMutable,
tensorstore::Box<>({2, 5}), chunk_transform,
source_array),
tensorstore::MatchesStatus(absl::StatusCode::kCancelled));
}
TEST(WriteArrayErrorTest, ChunkTransformIndexArrayMap) {
tensorstore::SharedArray<const void> base_source_array =
MakeArray<int32_t>({{7, 8, 9}, {10, 11, 12}});
tensorstore::TransformedSharedArray<const void> source_array =
base_source_array;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto chunk_transform,
tensorstore::IdentityTransform(source_array.domain()) |
tensorstore::Dims(1).OuterIndexArraySlice(
tensorstore::MakeArray<Index>({0, 1, 2})));
EXPECT_THAT(TestWriteArrayError(WriteArraySourceCapabilities::kMutable,
tensorstore::Box<>({2, 3}), chunk_transform,
source_array),
tensorstore::MatchesStatus(absl::StatusCode::kCancelled));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/async_write_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/async_write_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
aa7a940d-b8e5-4e31-bdf3-0fefd4811866 | cpp | google/tensorstore | utf8 | tensorstore/internal/utf8.cc | tensorstore/internal/utf8_test.cc | #include "tensorstore/internal/utf8.h"
#include <cstdint>
#include <string_view>
namespace tensorstore {
namespace internal {
namespace {
namespace utf8_decode {
using State = uint32_t;
constexpr State kAccept = 0;
#if 0
constexpr State kReject = 1;
#endif
const uint8_t utf8d[400] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
0xa,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x4,0x3,0x3,
0xb,0x6,0x6,0x6,0x5,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,
0x0,0x1,0x2,0x3,0x5,0x8,0x7,0x1,0x1,0x1,0x4,0x6,0x1,0x1,0x1,0x1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,0,1,0,1,1,1,1,1,1,
1,2,1,1,1,1,1,2,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,
1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,3,1,3,1,1,1,1,1,1,
1,3,1,1,1,1,1,3,1,3,1,1,1,1,1,1,1,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
};
inline State Decode(State* state, char32_t* codep, uint8_t byte) {
uint32_t type = utf8d[byte];
*codep = (*state != kAccept) ? (byte & 0x3fu) | (*codep << 6)
: (0xff >> type) & (byte);
*state = utf8d[256 + *state * 16 + type];
return *state;
}
}
}
bool IsValidUtf8(std::string_view code_units) {
using utf8_decode::kAccept;
utf8_decode::State state = utf8_decode::kAccept;
char32_t codep;
for (const char x : code_units) {
utf8_decode::Decode(&state, &codep, x);
}
return state == kAccept;
}
}
} | #include "tensorstore/internal/utf8.h"
#include <string_view>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::IsValidUtf8;
TEST(IsValidUtf8Test, Empty) {
EXPECT_TRUE(IsValidUtf8(""));
}
TEST(IsValidUtf8Test, Ascii) {
EXPECT_TRUE(IsValidUtf8("ascii"));
EXPECT_TRUE(IsValidUtf8(std::string_view("\0", 1)));
}
TEST(IsValidUtf8Test, TwoByte) {
EXPECT_TRUE(IsValidUtf8("\xc2\x80"));
EXPECT_TRUE(IsValidUtf8("\xc2\x80hello\xc2\xbf"));
}
TEST(IsValidUtf8Test, ThreeByte) {
EXPECT_TRUE(IsValidUtf8("\xe0\xa0\x80"));
}
TEST(IsValidUtf8Test, FourByte) {
EXPECT_TRUE(IsValidUtf8("\xf0\x90\x80\x80"));
}
TEST(IsValidUtf8Test, Surrogate) {
EXPECT_FALSE(IsValidUtf8("\xed\xa0\x80"));
EXPECT_FALSE(IsValidUtf8("\xed\xb0\x80"));
EXPECT_FALSE(IsValidUtf8("\xed\xa0\x80\xed\xb0\x80"));
}
TEST(IsValidUtf8Test, IllFormedFirstByte) {
EXPECT_FALSE(IsValidUtf8("\x80"));
EXPECT_FALSE(IsValidUtf8("\xC1"));
EXPECT_FALSE(IsValidUtf8("\xF5"));
EXPECT_FALSE(IsValidUtf8("\xFF"));
}
TEST(IsValidUtf8Test, OverlongNul) {
EXPECT_FALSE(IsValidUtf8("\xc0\x80"));
EXPECT_FALSE(IsValidUtf8("\xe0\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xf0\x80\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xf8\x80\x80\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xfc\x80\x80\x80\x80\x80"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/utf8.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/utf8_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c18dc50b-44db-4d22-ad51-b6e9d1082d44 | cpp | google/tensorstore | grid_partition | tensorstore/internal/grid_partition.cc | tensorstore/internal/grid_partition_test.cc | #include "tensorstore/internal/grid_partition.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_map.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
using ::tensorstore::internal::OutputToGridCellFn;
using ::tensorstore::internal_index_space::TransformAccess;
namespace tensorstore {
namespace internal_grid_partition {
namespace {
using IndexArraySet = IndexTransformGridPartition::IndexArraySet;
using StridedSet = IndexTransformGridPartition::StridedSet;
struct ConnectedSetIterateParameters {
const IndexTransformGridPartition& info;
tensorstore::span<const DimensionIndex> grid_output_dimensions;
OutputToGridCellFn output_to_grid_cell;
IndexTransformView<> transform;
absl::FunctionRef<absl::Status(
tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func;
};
void InitializeConstantGridCellIndices(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
tensorstore::span<Index> grid_cell_indices) {
for (DimensionIndex grid_dim = 0; grid_dim < grid_output_dimensions.size();
++grid_dim) {
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
const OutputIndexMapRef<> map = transform.output_index_map(output_dim);
if (map.method() != OutputIndexMethod::constant) continue;
grid_cell_indices[grid_dim] =
output_to_grid_cell(grid_dim, map.offset(), nullptr);
}
}
class StridedSetGridCellIterator {
public:
explicit StridedSetGridCellIterator(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell, StridedSet strided_set)
: transform_(transform),
grid_output_dimensions_(grid_output_dimensions),
output_to_grid_cell_(output_to_grid_cell),
strided_set_(strided_set) {
Reset();
}
void Reset() {
const IndexInterval domain =
transform_.input_domain()[strided_set_.input_dimension];
input_end_index_ = domain.exclusive_max();
input_index_ = domain.inclusive_min();
}
bool AtEnd() const { return input_index_ == input_end_index_; }
IndexInterval Next(tensorstore::span<Index> output_grid_cell_indices) {
assert(!AtEnd());
IndexInterval restricted_domain =
IndexInterval::UncheckedHalfOpen(input_index_, input_end_index_);
for (const DimensionIndex grid_dim :
strided_set_.grid_dimensions.index_view()) {
const DimensionIndex output_dim = grid_output_dimensions_[grid_dim];
const OutputIndexMapRef<> map = transform_.output_index_map(output_dim);
IndexInterval cell_range;
output_grid_cell_indices[grid_dim] = output_to_grid_cell_(
grid_dim, input_index_ * map.stride() + map.offset(), &cell_range);
const IndexInterval cell_domain =
GetAffineTransformDomain(cell_range, map.offset(), map.stride())
.value();
restricted_domain = Intersect(restricted_domain, cell_domain);
}
assert(!restricted_domain.empty());
input_index_ = restricted_domain.exclusive_max();
return restricted_domain;
}
private:
IndexTransformView<> transform_;
tensorstore::span<const DimensionIndex> grid_output_dimensions_;
OutputToGridCellFn output_to_grid_cell_;
StridedSet strided_set_;
Index input_end_index_;
Index input_index_;
};
class IndexArraySetIterator {
public:
IndexArraySetIterator(const IndexArraySet& index_array_set)
: grid_dimensions_(index_array_set.grid_dimensions),
grid_cell_indices_(index_array_set.grid_cell_indices),
partition_end_index_(index_array_set.num_partitions()),
partition_index_(0) {}
void Reset() { partition_index_ = 0; }
bool AtEnd() const { return partition_index_ == partition_end_index_; }
Index Next(tensorstore::span<Index> output_grid_cell_indices) {
assert(!AtEnd());
const Index grid_cell_indices_offset =
partition_index_ * grid_dimensions_.count();
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dimensions_.index_view()) {
output_grid_cell_indices[grid_dim] =
grid_cell_indices_[grid_cell_indices_offset + grid_i++];
}
return partition_index_++;
}
private:
DimensionSet grid_dimensions_;
tensorstore::span<const Index> grid_cell_indices_;
Index partition_end_index_;
Index partition_index_;
};
class ConnectedSetIterateHelper {
public:
explicit ConnectedSetIterateHelper(ConnectedSetIterateParameters params)
: params_(std::move(params)),
grid_cell_indices_(params_.grid_output_dimensions.size()),
cell_transform_(internal_grid_partition::InitializeCellTransform(
params_.info, params_.transform)) {
InitializeConstantGridCellIndices(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, grid_cell_indices_);
}
absl::Status Iterate() { return IterateOverIndexArraySets(0); }
private:
absl::Status IterateOverIndexArraySets(DimensionIndex set_i) {
if (set_i == params_.info.index_array_sets().size()) {
return IterateOverStridedSets(0);
}
const IndexArraySet& index_array_set =
params_.info.index_array_sets()[set_i];
IndexArraySetIterator iterator(index_array_set);
while (!iterator.AtEnd()) {
Index partition_i = iterator.Next(grid_cell_indices_);
UpdateCellTransformForIndexArraySetPartition(
index_array_set, set_i, partition_i, cell_transform_.get());
TENSORSTORE_RETURN_IF_ERROR(IterateOverIndexArraySets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status IterateOverStridedSets(DimensionIndex set_i) {
if (set_i == params_.info.strided_sets().size()) return InvokeCallback();
StridedSetGridCellIterator iterator(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, params_.info.strided_sets()[set_i]);
const DimensionIndex cell_input_dim =
set_i + params_.info.index_array_sets().size();
while (!iterator.AtEnd()) {
auto restricted_domain = iterator.Next(grid_cell_indices_);
cell_transform_->input_origin()[cell_input_dim] =
restricted_domain.inclusive_min();
cell_transform_->input_shape()[cell_input_dim] = restricted_domain.size();
TENSORSTORE_RETURN_IF_ERROR(IterateOverStridedSets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status InvokeCallback() {
internal_index_space::DebugCheckInvariants(cell_transform_.get());
auto status = params_.func(
grid_cell_indices_,
TransformAccess::Make<IndexTransformView<>>(cell_transform_.get()));
cell_transform_ = MutableRep(std::move(cell_transform_));
return status;
}
ConnectedSetIterateParameters params_;
absl::FixedArray<Index, internal::kNumInlinedDims> grid_cell_indices_;
internal_index_space::TransformRep::Ptr<> cell_transform_;
};
bool GetStridedGridCellRanges(
IndexTransformView<> transform, OutputToGridCellFn output_to_grid_cell,
DimensionIndex grid_dim, DimensionIndex output_dim,
absl::FunctionRef<bool(IndexInterval grid_cell_range)> callback) {
const auto output_map = transform.output_index_maps()[output_dim];
assert(output_map.method() == OutputIndexMethod::single_input_dimension);
const Index output_offset = output_map.offset();
const Index output_stride = output_map.stride();
const DimensionIndex input_dim = output_map.input_dimension();
const IndexInterval input_domain = transform.domain().box()[input_dim];
if (output_map.stride() == 1 || output_map.stride() == -1) {
auto output_range = tensorstore::GetAffineTransformRange(
input_domain, output_offset, output_stride)
.value();
Index min_cell_index =
output_to_grid_cell(grid_dim, output_range.inclusive_min(), nullptr);
Index max_cell_index =
output_to_grid_cell(grid_dim, output_range.inclusive_max(), nullptr);
return callback(
IndexInterval::UncheckedClosed(min_cell_index, max_cell_index));
}
IndexInterval prev_interval;
for (Index input_index = input_domain.inclusive_min();
input_index < input_domain.exclusive_max();) {
IndexInterval output_range;
Index grid_cell = output_to_grid_cell(
grid_dim, input_index * output_stride + output_offset, &output_range);
const IndexInterval cell_domain =
GetAffineTransformDomain(output_range, output_offset, output_stride)
.value();
assert(!cell_domain.empty());
if (grid_cell == prev_interval.exclusive_min() ||
grid_cell == prev_interval.exclusive_max()) {
prev_interval = IndexInterval::UncheckedClosed(
std::min(prev_interval.inclusive_min(), grid_cell),
std::max(prev_interval.inclusive_max(), grid_cell));
} else {
if (IsFinite(prev_interval)) {
if (!callback(prev_interval)) return false;
}
prev_interval = IndexInterval::UncheckedClosed(grid_cell, grid_cell);
}
input_index = cell_domain.exclusive_max();
}
return callback(prev_interval);
}
struct GetGridCellRangesIterateParameters {
const IndexTransformGridPartition& info;
tensorstore::span<const DimensionIndex> grid_output_dimensions;
OutputToGridCellFn output_to_grid_cell;
IndexTransformView<> transform;
absl::FunctionRef<absl::Status(BoxView<> bounds)> func;
DimensionIndex outer_prefix_rank;
BoxView<> grid_bounds;
tensorstore::span<const IndexInterval> inner_intervals;
tensorstore::span<const StridedSet*> strided_sets_in_prefix;
tensorstore::span<const IndexArraySet*> index_array_sets_in_prefix;
};
class GetGridCellRangesIterateHelper {
public:
explicit GetGridCellRangesIterateHelper(
GetGridCellRangesIterateParameters params)
: params_(params) {
InitializeConstantGridCellIndices(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell,
tensorstore::span<Index>(&grid_bounds_origin_[0],
params_.transform.output_rank()));
for (DimensionIndex i = 0; i < params.outer_prefix_rank; ++i) {
grid_bounds_shape_[i] = 1;
}
for (DimensionIndex i = params.outer_prefix_rank + 1,
rank = params.grid_bounds.rank();
i < rank; ++i) {
grid_bounds_origin_[i] = params.grid_bounds.origin()[i];
grid_bounds_shape_[i] = params.grid_bounds.shape()[i];
}
if (params.inner_intervals.size() == 1) {
const auto& inner_interval = params.inner_intervals[0];
grid_bounds_origin_[params.outer_prefix_rank] =
inner_interval.inclusive_min();
grid_bounds_shape_[params.outer_prefix_rank] = inner_interval.size();
}
}
absl::Status Iterate() { return IterateOverIndexArraySets(0); }
private:
GetGridCellRangesIterateParameters params_;
Index grid_bounds_origin_[kMaxRank];
Index grid_bounds_shape_[kMaxRank];
absl::Status IterateOverIndexArraySets(DimensionIndex set_i) {
if (set_i == params_.index_array_sets_in_prefix.size()) {
return IterateOverStridedSets(0);
}
const IndexArraySet& index_array_set =
*params_.index_array_sets_in_prefix[set_i];
const auto grid_dimensions = index_array_set.grid_dimensions;
const DimensionIndex num_grid_dimensions = grid_dimensions.count();
for (Index partition_i = 0,
num_partitions = index_array_set.num_partitions();
partition_i < num_partitions; ++partition_i) {
const Index grid_cell_indices_offset = partition_i * num_grid_dimensions;
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dimensions.index_view()) {
grid_bounds_origin_[grid_dim] =
index_array_set
.grid_cell_indices[grid_cell_indices_offset + grid_i++];
}
TENSORSTORE_RETURN_IF_ERROR(IterateOverIndexArraySets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status IterateOverStridedSets(DimensionIndex set_i) {
if (set_i == params_.strided_sets_in_prefix.size()) return InvokeCallback();
StridedSetGridCellIterator iterator(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, *params_.strided_sets_in_prefix[set_i]);
while (!iterator.AtEnd()) {
iterator.Next(grid_bounds_origin_);
TENSORSTORE_RETURN_IF_ERROR(IterateOverStridedSets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status InvokeCallback() {
MutableBoxView<> bounds(params_.grid_bounds.rank(), grid_bounds_origin_,
grid_bounds_shape_);
if (params_.inner_intervals.size() == 1) {
return params_.func(bounds);
}
DimensionIndex outer_prefix_rank = params_.outer_prefix_rank;
for (const auto& inner_interval : params_.inner_intervals) {
bounds[outer_prefix_rank] = inner_interval;
TENSORSTORE_RETURN_IF_ERROR(params_.func(bounds));
}
return absl::OkStatus();
}
};
}
}
namespace internal {
absl::Status PartitionIndexTransformOverGrid(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell, IndexTransformView<> transform,
absl::FunctionRef<
absl::Status(tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func) {
internal_grid_partition::IndexTransformGridPartition partition_info;
auto status = internal_grid_partition::PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell, partition_info);
if (!status.ok()) return status;
return internal_grid_partition::ConnectedSetIterateHelper(
{partition_info,
grid_output_dimensions,
output_to_grid_cell,
transform,
std::move(func)})
.Iterate();
}
}
namespace internal_grid_partition {
absl::Status GetGridCellRanges(
const IndexTransformGridPartition& grid_partition,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
BoxView<> grid_bounds, OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(BoxView<> bounds)> callback) {
assert(grid_output_dimensions.size() == grid_bounds.rank());
if (transform.domain().box().is_empty()) {
return absl::OkStatus();
}
if (grid_output_dimensions.empty()) {
return callback({});
}
std::array<DimensionIndex, kMaxRank> dim_to_indexed_set;
dim_to_indexed_set.fill(-1);
DimensionSet one_to_one_grid_dims;
for (const auto& strided_set : grid_partition.strided_sets()) {
if (strided_set.grid_dimensions.count() != 1) {
continue;
}
const DimensionIndex grid_dim =
strided_set.grid_dimensions.index_view().front();
one_to_one_grid_dims[grid_dim] = true;
}
for (size_t i = 0; i < grid_partition.index_array_sets().size(); ++i) {
const auto& set = grid_partition.index_array_sets()[i];
if (set.grid_dimensions.count() != 1) {
continue;
}
const DimensionIndex grid_dim = set.grid_dimensions.index_view().front();
one_to_one_grid_dims[grid_dim] = true;
dim_to_indexed_set[grid_dim] = i;
}
absl::InlinedVector<IndexInterval, 1> inner_intervals;
DimensionSet grid_dimensions_outside_prefix;
DimensionIndex range_queryable_grid_dim = grid_output_dimensions.size() - 1;
for (; range_queryable_grid_dim >= 0; --range_queryable_grid_dim) {
const DimensionIndex grid_dim = range_queryable_grid_dim;
const IndexInterval grid_interval = grid_bounds[grid_dim];
if (grid_interval.size() == 1) {
inner_intervals.clear();
inner_intervals.push_back(grid_interval);
continue;
}
if (!one_to_one_grid_dims[grid_dim]) {
break;
}
grid_dimensions_outside_prefix[grid_dim] = true;
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
inner_intervals.clear();
DimensionIndex indexed_set_i = dim_to_indexed_set[grid_dim];
if (indexed_set_i == -1) {
internal_grid_partition::GetStridedGridCellRanges(
transform, output_to_grid_cell, grid_dim, output_dim,
[&](IndexInterval grid_cell_range) {
inner_intervals.push_back(grid_cell_range);
return true;
});
} else {
const auto& set = grid_partition.index_array_sets()[indexed_set_i];
const auto& grid_cell_indices = set.grid_cell_indices;
size_t i = 0;
while (i < grid_cell_indices.size()) {
size_t last_i = i;
while (last_i + 1 < grid_cell_indices.size() &&
grid_cell_indices[last_i] + 1 == grid_cell_indices[last_i + 1]) {
++last_i;
}
inner_intervals.push_back(IndexInterval::UncheckedClosed(
grid_cell_indices[i], grid_cell_indices[last_i]));
i = last_i + 1;
}
}
if (inner_intervals.size() == 1 &&
tensorstore::Contains(inner_intervals[0], grid_interval)) {
inner_intervals.clear();
inner_intervals.push_back(grid_interval);
continue;
}
--range_queryable_grid_dim;
break;
}
const StridedSet* strided_sets_in_prefix_storage[kMaxRank];
const IndexArraySet* index_array_sets_in_prefix_storage[kMaxRank];
const auto get_sets_in_prefix = [&](auto sets, auto* buffer) {
ptrdiff_t i = 0;
for (const auto& set : sets) {
if (grid_dimensions_outside_prefix[set.grid_dimensions.index_view()
.front()]) {
continue;
}
buffer[i++] = &set;
}
return tensorstore::span(buffer, i);
};
auto strided_sets_in_prefix = get_sets_in_prefix(
grid_partition.strided_sets(), strided_sets_in_prefix_storage);
auto index_array_sets_in_prefix = get_sets_in_prefix(
grid_partition.index_array_sets(), index_array_sets_in_prefix_storage);
if (range_queryable_grid_dim == grid_output_dimensions.size() - 1) {
inner_intervals.push_back(grid_bounds[range_queryable_grid_dim]);
}
internal_grid_partition::GetGridCellRangesIterateHelper iterate_helper(
internal_grid_partition::GetGridCellRangesIterateParameters{
grid_partition, grid_output_dimensions, output_to_grid_cell,
transform, callback, range_queryable_grid_dim + 1, grid_bounds,
inner_intervals, strided_sets_in_prefix, index_array_sets_in_prefix});
return iterate_helper.Iterate();
}
}
namespace internal {
absl::Status GetGridCellRanges(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
BoxView<> grid_bounds, OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(BoxView<> bounds)> callback) {
using internal_grid_partition::StridedSet;
assert(grid_output_dimensions.size() == grid_bounds.rank());
if (transform.domain().box().is_empty()) {
return absl::OkStatus();
}
if (grid_output_dimensions.empty()) {
return callback({});
}
internal_grid_partition::IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(
internal_grid_partition::PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell,
grid_partition));
return internal_grid_partition::GetGridCellRanges(
grid_partition, grid_output_dimensions, grid_bounds, output_to_grid_cell,
transform, callback);
}
}
} | #include "tensorstore/internal/grid_partition.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/irregular_grid.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::Result;
using ::tensorstore::internal::GetGridCellRanges;
using ::tensorstore::internal::IrregularGrid;
using ::tensorstore::internal::OutputToGridCellFn;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
namespace partition_tests {
using R = std::pair<std::vector<Index>, IndexTransform<>>;
std::vector<R> GetPartitions(
const std::vector<DimensionIndex>& grid_output_dimensions,
const std::vector<Index>& grid_cell_shape, IndexTransformView<> transform) {
std::vector<R> results;
IndexTransformGridPartition info;
RegularGridRef grid{grid_cell_shape};
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, info));
TENSORSTORE_CHECK_OK(
tensorstore::internal::PartitionIndexTransformOverGrid(
grid_output_dimensions, grid, transform,
[&](tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
auto cell_transform_direct = info.GetCellTransform(
transform, grid_cell_indices, grid_output_dimensions,
[&](DimensionIndex dim, Index cell_index) {
return grid.GetCellOutputInterval(dim, cell_index);
});
EXPECT_EQ(cell_transform_direct, cell_transform);
results.emplace_back(std::vector<Index>(grid_cell_indices.begin(),
grid_cell_indices.end()),
IndexTransform<>(cell_transform));
return absl::OkStatus();
}));
return results;
}
TEST(PartitionIndexTransformOverRegularGrid, ConstantOneDimensional) {
const auto results = GetPartitions({0}, {2},
IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({4})
.output_constant(0, 3)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({4})
.output_single_input_dimension(0, 0)
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, ConstantTwoDimensional) {
const auto results = GetPartitions({0, 1}, {2, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({2, 3})
.input_shape({4, 5})
.output_constant(0, 3)
.output_constant(1, 7)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({2, 3})
.input_shape({4, 5})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, OneDimensionalUnitStride) {
const auto results = GetPartitions({0}, {2},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({5})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()},
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({-2})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()},
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, TwoDimensionalIdentity) {
const auto results = GetPartitions({0, 1}, {20, 10},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({30, 30})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{0, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 10})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 20})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 0})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 10})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 20})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleStridedDimension) {
const auto results =
GetPartitions({0}, {10},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({6})
.output_single_input_dimension(0, 5, 3, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()},
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({-1})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, DiagonalStridedDimensions) {
const auto results =
GetPartitions({0, 1}, {10, 8},
IndexTransformBuilder<>(1, 2)
.input_origin({-4})
.input_shape({6})
.output_single_input_dimension(0, 5, 3, 0)
.output_single_input_dimension(1, 7, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({-1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleIndexArrayDimension) {
const auto results =
GetPartitions({0}, {3},
IndexTransformBuilder<>(1, 1)
.input_origin({100})
.input_shape({8})
.output_index_array(
0, 0, 1, MakeArray<Index>({1, 2, 3, 4, 5, 6, 7, 8}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 101}))
.Finalize()
.value()},
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({102, 103, 104}))
.Finalize()
.value()},
R{{2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({105, 106, 107}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleIndexArrayDimensionStrided) {
const auto results = GetPartitions(
{0}, {10},
IndexTransformBuilder<>(1, 1)
.input_origin({100})
.input_shape({6})
.output_index_array(0, 5, 3, MakeArray<Index>({10, 3, 4, -5, -6, 11}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({104}))
.Finalize()
.value()},
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({103}))
.Finalize()
.value()},
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({101, 102}))
.Finalize()
.value()},
R{{3},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 105}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, TwoIndexArrayDimensions) {
const auto results = GetPartitions(
{0, 1}, {10, 8},
IndexTransformBuilder<>(1, 2)
.input_origin({100})
.input_shape({6})
.output_index_array(0, 5, 3, MakeArray<Index>({10, 3, 4, -5, -6, 11}))
.output_index_array(1, 4, -2, MakeArray<Index>({5, 1, 7, -3, -2, 5}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({104}))
.Finalize()
.value()},
R{{-1, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({103}))
.Finalize()
.value()},
R{{1, -2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({102}))
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({101}))
.Finalize()
.value()},
R{{3, -1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 105}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, IndexArrayAndStridedDimensions) {
const auto results = GetPartitions(
{0, 1}, {10, 8},
IndexTransformBuilder<>(2, 2)
.input_origin({-4, 100})
.input_shape({6, 3})
.output_index_array(0, 5, 3, MakeArray<Index>({{10, 3, 4}}))
.output_single_input_dimension(1, 4, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}, {102}}))
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -1})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}, {102}}))
.Finalize()
.value()},
R{{3, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -1})
.input_shape({1, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()}));
}
std::vector<R> GetIrregularPartitions(
const std::vector<DimensionIndex>& grid_output_dimensions,
const IrregularGrid& grid, IndexTransformView<> transform) {
std::vector<R> results;
TENSORSTORE_CHECK_OK(tensorstore::internal::PartitionIndexTransformOverGrid(
grid_output_dimensions, grid, transform,
[&](tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
results.emplace_back(std::vector<Index>(grid_cell_indices.begin(),
grid_cell_indices.end()),
IndexTransform<>(cell_transform));
return absl::OkStatus();
}));
return results;
}
TEST(PartitionIndexTransformOverIrregularGrid, TwoDimensionalIdentity) {
const std::vector<DimensionIndex> grid_output_dimensions{0, 1};
std::vector<Index> dimension0{15};
std::vector<Index> dimension1{-10, 10, 100};
IrregularGrid grid({dimension0, dimension1});
std::vector<R> results =
GetIrregularPartitions(grid_output_dimensions, grid,
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({30, 30})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({15, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{-1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 10})
.input_shape({15, 20})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({15, 0})
.input_shape({15, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({15, 10})
.input_shape({15, 20})
.output_identity_transform()
.Finalize()
.value()}
));
}
TEST(PartitionIndexTransformOverIrregularGrid, IndexArrayAndStridedDimensions) {
std::vector<Index> dimension0{10, 15, 20, 30, 50};
std::vector<Index> dimension1{0, 1, 5, 10, 13};
IrregularGrid grid({dimension0, dimension1});
std::vector<R> results = GetIrregularPartitions(
{0, 1}, grid,
IndexTransformBuilder<>(2, 2)
.input_origin({-4, 100})
.input_shape({6, 3})
.output_index_array(0, 5, 3, MakeArray<Index>({{10, 3, 4}}))
.output_single_input_dimension(1, 4, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(R{{0, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{0, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{1, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{3, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()}
));
}
}
namespace get_grid_cell_ranges_tests {
using R = Box<>;
Result<std::vector<R>> GetRanges(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
BoxView<> grid_bounds, OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> transform) {
std::vector<R> results;
IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell, grid_partition));
TENSORSTORE_RETURN_IF_ERROR(GetGridCellRanges(
grid_output_dimensions, grid_bounds, output_to_grid_cell, transform,
[&](BoxView<> bounds) -> absl::Status {
results.emplace_back(bounds);
return absl::OkStatus();
}));
return results;
}
TEST(GetGridCellRangesTest, Rank0) {
EXPECT_THAT(GetRanges({}, {},
RegularGridRef{{}},
IndexTransformBuilder(0, 0).Finalize().value()),
::testing::Optional(ElementsAre(R{})));
}
TEST(GetGridCellRangesTest, Rank1Unconstrained) {
EXPECT_THAT(GetRanges({{0}},
Box<>{{0}, {10}},
RegularGridRef{{{5}}},
IndexTransformBuilder(1, 1)
.input_shape({50})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R{{0}, {10}})));
}
TEST(GetGridCellRangesTest, Rank1Constrained) {
EXPECT_THAT(GetRanges({{0}},
Box<>{{0}, {10}},
RegularGridRef{{{5}}},
IndexTransformBuilder(1, 1)
.input_origin({7})
.input_shape({30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R({1}, {7}))));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedBothDims) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 10}}},
IndexTransformBuilder(2, 2)
.input_origin({6, 7})
.input_shape({8, 30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 0}, {1, 4}},
R{{2, 0}, {1, 4}}
)));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedFirstDimOnly) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({6, 0})
.input_shape({8, 50})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R{{1, 0}, {2, 10}})));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedSecondDimOnly) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({0, 7})
.input_shape({25, 30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{0, 1}, {1, 7}},
R{{1, 1}, {1, 7}},
R{{2, 1}, {1, 7}},
R{{3, 1}, {1, 7}},
R{{4, 1}, {1, 7}}
)));
}
TEST(GetGridCellRangesTest, Rank2IndexArrayFirstDimUnconstrainedSecondDim) {
EXPECT_THAT(
GetRanges(
{{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({0, 0})
.input_shape({3, 50})
.output_index_array(0, 0, 1, MakeArray<Index>({{6}, {15}, {20}}))
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 0}, {1, 10}},
R{{3, 0}, {2, 10}}
)));
}
TEST(GetGridCellRangesTest, Rank2IndexArrayFirstDimConstrainedSecondDim) {
EXPECT_THAT(
GetRanges(
{{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({0, 7})
.input_shape({3, 30})
.output_index_array(0, 0, 1, MakeArray<Index>({{6}, {15}, {20}}))
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 1}, {1, 7}},
R{{3, 1}, {1, 7}},
R{{4, 1}, {1, 7}}
)));
}
TEST(GetGridCellRangesTest, Rank2Diagonal) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 10}}},
IndexTransformBuilder(1, 2)
.input_origin({6})
.input_shape({8})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 0}, {1, 1}},
R{{2, 1}, {1, 1}}
)));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_partition.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_partition_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4b005f56-f935-491f-983f-62ac8cec25dd | cpp | google/tensorstore | grid_partition_impl | tensorstore/internal/grid_partition_impl.cc | tensorstore/internal/grid_partition_impl_test.cc | #include "tensorstore/internal/grid_partition_impl.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_map.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_grid_partition {
using ::tensorstore::internal_index_space::OutputIndexMap;
using ::tensorstore::internal_index_space::TransformRep;
using IndexArraySet = IndexTransformGridPartition::IndexArraySet;
using StridedSet = IndexTransformGridPartition::StridedSet;
using OutputToGridCellFn = absl::FunctionRef<Index(
DimensionIndex grid_dim, Index output_index, IndexInterval* cell_bounds)>;
SharedArray<const Index, 2>
IndexTransformGridPartition::IndexArraySet::partition_input_indices(
Index partition_i) const {
assert(partition_i >= 0 && partition_i < num_partitions());
SharedArray<const Index, 2> result;
const Index start = grid_cell_partition_offsets[partition_i];
const Index end =
static_cast<size_t>(partition_i + 1) == grid_cell_partition_offsets.size()
? partitioned_input_indices.shape()[0]
: grid_cell_partition_offsets[partition_i + 1];
assert(start >= 0 && start < partitioned_input_indices.shape()[0]);
assert(end > start && end <= partitioned_input_indices.shape()[0]);
result.pointer() =
std::shared_ptr<const Index>(partitioned_input_indices.pointer(),
&partitioned_input_indices(start, 0));
result.layout() = partitioned_input_indices.layout();
result.shape()[0] = end - start;
return result;
}
tensorstore::span<const Index>
IndexTransformGridPartition::IndexArraySet::partition_grid_cell_indices(
Index partition_i) const {
assert(partition_i >= 0 && partition_i < num_partitions());
assert(grid_cell_indices.size() ==
static_cast<size_t>(num_partitions() * grid_dimensions.count()));
return tensorstore::span(
&grid_cell_indices[partition_i * grid_dimensions.count()],
grid_dimensions.count());
}
namespace {
struct GridCellIndicesIndirectPartialCompare {
DimensionSet grid_dimensions;
const Index* grid_cell_indices_for_partitions;
Index operator()(Index partition_i, const Index* full_indices) const {
const Index* other_grid_cell_indices =
grid_cell_indices_for_partitions +
partition_i * grid_dimensions.count();
DimensionIndex j = 0;
for (DimensionIndex grid_dim : grid_dimensions.index_view()) {
Index diff = other_grid_cell_indices[j] - full_indices[grid_dim];
if (diff != 0) {
return diff;
}
++j;
}
return 0;
}
};
}
Index IndexTransformGridPartition::IndexArraySet::FindPartition(
tensorstore::span<const Index> grid_cell_indices) const {
Index lower = 0, upper = num_partitions();
GridCellIndicesIndirectPartialCompare compare{grid_dimensions,
this->grid_cell_indices.data()};
while (lower != upper) {
Index mid = (lower + upper) / 2;
Index c = compare(mid, grid_cell_indices.data());
if (c == 0) return mid;
if (c > 0) {
upper = mid;
} else {
lower = mid + 1;
}
}
return -1;
}
void UpdateCellTransformForIndexArraySetPartition(
const IndexArraySet& index_array_set, DimensionIndex set_i,
Index partition_i, internal_index_space::TransformRep* cell_transform) {
const SharedArray<const Index, 2> partition_input_indices =
index_array_set.partition_input_indices(partition_i);
cell_transform->input_shape()[set_i] = partition_input_indices.shape()[0];
ByteStridedPointer<const Index> partition_input_indices_ptr =
partition_input_indices.byte_strided_pointer();
const Index vector_dimension_byte_stride =
partition_input_indices.byte_strides()[1];
const tensorstore::span<OutputIndexMap> output_maps =
cell_transform->output_index_maps();
for (DimensionIndex full_input_dim :
index_array_set.input_dimensions.index_view()) {
internal_index_space::IndexArrayData& index_array_data =
output_maps[full_input_dim].index_array_data();
index_array_data.element_pointer = std::shared_ptr<const Index>(
partition_input_indices.pointer(), partition_input_indices_ptr);
partition_input_indices_ptr += vector_dimension_byte_stride;
}
}
IndexTransform<> IndexTransformGridPartition::GetCellTransform(
IndexTransformView<> full_transform,
tensorstore::span<const Index> grid_cell_indices,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
absl::FunctionRef<IndexInterval(DimensionIndex grid_dim,
Index grid_cell_index)>
get_grid_cell_output_interval) const {
auto cell_transform = InitializeCellTransform(*this, full_transform);
for (DimensionIndex set_i = 0, num_sets = index_array_sets().size();
set_i < num_sets; ++set_i) {
const IndexArraySet& index_array_set = index_array_sets()[set_i];
const Index partition_i = index_array_set.FindPartition(grid_cell_indices);
assert(partition_i != -1);
UpdateCellTransformForIndexArraySetPartition(
index_array_set, set_i, partition_i, cell_transform.get());
}
for (DimensionIndex set_i = 0, num_sets = strided_sets().size();
set_i < num_sets; ++set_i) {
const StridedSet& strided_set = strided_sets()[set_i];
const DimensionIndex cell_input_dim = set_i + index_array_sets().size();
IndexInterval restricted_domain =
full_transform.input_domain()[strided_set.input_dimension];
for (const DimensionIndex grid_dim :
strided_set.grid_dimensions.index_view()) {
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
IndexInterval cell_range =
get_grid_cell_output_interval(grid_dim, grid_cell_indices[grid_dim]);
const OutputIndexMapRef<> map =
full_transform.output_index_map(output_dim);
const IndexInterval cell_domain =
GetAffineTransformDomain(cell_range, map.offset(), map.stride())
.value();
restricted_domain = Intersect(restricted_domain, cell_domain);
}
assert(!restricted_domain.empty());
cell_transform->input_origin()[cell_input_dim] =
restricted_domain.inclusive_min();
cell_transform->input_shape()[cell_input_dim] = restricted_domain.size();
}
return internal_index_space::TransformAccess::Make<IndexTransform<>>(
std::move(cell_transform));
}
namespace {
template <typename SetCallbackFn>
void ForEachConnectedSet(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
IndexTransformView<> transform, SetCallbackFn set_callback) {
DimensionSet input_dims_for_grid_dims[kMaxRank];
DimensionSet grid_dims_with_array_dependence;
for (DimensionIndex grid_dim = 0; grid_dim < grid_output_dimensions.size();
++grid_dim) {
auto [input_dims, array_dependence] =
internal::GetInputDimensionsForOutputDimension(
transform, grid_output_dimensions[grid_dim]);
input_dims_for_grid_dims[grid_dim] = input_dims;
grid_dims_with_array_dependence[grid_dim] = array_dependence;
}
DimensionSet current_input_dims, current_grid_dims;
DimensionSet remaining_grid_dims{
DimensionSet::UpTo(grid_output_dimensions.size())};
bool current_set_has_array;
const auto add_grid_dim_to_current_set =
[&](DimensionIndex grid_dim) -> DimensionSet {
assert(remaining_grid_dims.test(grid_dim));
assert(grid_dim >= 0 && grid_dim < grid_output_dimensions.size());
remaining_grid_dims.reset(grid_dim);
current_grid_dims.set(grid_dim);
auto input_dims = input_dims_for_grid_dims[grid_dim];
current_set_has_array |= grid_dims_with_array_dependence[grid_dim];
current_input_dims |= input_dims;
return input_dims;
};
const auto is_grid_dim_in_set =
[&](DimensionIndex grid_dim) -> DimensionIndex {
assert(remaining_grid_dims.test(grid_dim));
assert(grid_dim >= 0 && grid_dim < grid_output_dimensions.size());
return !(input_dims_for_grid_dims[grid_dim] & current_input_dims).none();
};
while (!remaining_grid_dims.none()) {
current_input_dims = {};
current_grid_dims = {};
current_set_has_array = false;
if (add_grid_dim_to_current_set(remaining_grid_dims.index_view().front())
.none()) {
continue;
}
for (DimensionIndex grid_dim : remaining_grid_dims.index_view()) {
if (is_grid_dim_in_set(grid_dim)) {
add_grid_dim_to_current_set(grid_dim);
}
}
set_callback(current_input_dims, current_grid_dims, current_set_has_array);
}
}
template <typename T, typename Stride, typename OutputIt, typename OutputStride>
OutputIt FillWithTiledStridedRange(T start, T size, Stride stride,
Index outer_count, Index inner_count,
OutputIt output,
OutputStride output_stride) {
const T end = start + size * stride;
for (Index outer_i = 0; outer_i < outer_count; ++outer_i) {
for (Index i = start; i != end; i += stride) {
for (Index inner_i = 0; inner_i < inner_count; ++inner_i) {
*output = i;
output += output_stride;
}
}
}
return output;
}
absl::Status GenerateSingleInputDimensionOutputIndices(
OutputIndexMapRef<> map, DimensionSet input_dims,
IndexTransformView<> index_transform, Index* output_indices,
Index output_stride) {
assert(map.method() == OutputIndexMethod::single_input_dimension);
const DimensionIndex single_input_dim = map.input_dimension();
const IndexInterval domain = index_transform.input_domain()[single_input_dim];
const Index stride = map.stride();
TENSORSTORE_RETURN_IF_ERROR(
GetAffineTransformRange(domain, map.offset(), stride));
const Index start = map.offset() + stride * domain.inclusive_min();
tensorstore::span<const Index> input_shape = index_transform.input_shape();
Index inner_count = 1;
Index outer_count = 1;
for (DimensionIndex input_dim : input_dims.index_view()) {
if (input_dim == single_input_dim) {
outer_count = inner_count;
inner_count = 1;
} else {
inner_count *= input_shape[input_dim];
}
}
FillWithTiledStridedRange(start, domain.size(), stride, outer_count,
inner_count, output_indices, output_stride);
return absl::OkStatus();
}
absl::Status GenerateIndexArrayOutputIndices(
OutputIndexMapRef<> map, DimensionSet input_dims,
IndexTransformView<> index_transform, Index* output_indices,
Index output_stride) {
assert(map.method() == OutputIndexMethod::array);
const DimensionIndex input_rank = index_transform.input_rank();
Index output_byte_strides[kMaxRank];
std::fill_n(&output_byte_strides[0], input_rank, static_cast<Index>(0));
DimensionIndex byte_stride = sizeof(Index) * output_stride;
Index input_dims_copy[kMaxRank];
DimensionIndex num_input_dims = 0;
for (DimensionIndex input_dim : input_dims.index_view()) {
input_dims_copy[num_input_dims++] = input_dim;
}
for (DimensionIndex i = num_input_dims - 1; i >= 0; --i) {
const DimensionIndex input_dim = input_dims_copy[i];
output_byte_strides[input_dim] = byte_stride;
byte_stride *= index_transform.input_shape()[input_dim];
}
const OutputIndexMapRef<>::IndexArrayView index_array = map.index_array();
TENSORSTORE_RETURN_IF_ERROR(ValidateIndexArrayBounds(
index_array.index_range(), index_array.array_ref()));
const Index stride = map.stride();
const Index offset = map.offset();
IterateOverArrays(
[stride, offset](const Index* source_ptr, Index* output_ptr) {
const Index source_index = *source_ptr;
*output_ptr = source_index * stride + offset;
return true;
},
skip_repeated_elements,
map.index_array().array_ref(),
ArrayView<Index>(
output_indices,
StridedLayoutView<>(index_transform.input_shape(),
tensorstore::span<const Index>(
&output_byte_strides[0], input_rank))));
return absl::OkStatus();
}
Result<Index> ProductOfIndirectExtents(
tensorstore::span<const Index> input_shape, DimensionSet dims) {
Index num_positions = 1;
for (const DimensionIndex dim : dims.index_view()) {
if (internal::MulOverflow(num_positions, input_shape[dim],
&num_positions)) {
return absl::InvalidArgumentError(
"Overflow computing number of positions in domain.");
}
}
return num_positions;
}
Result<std::vector<Index>> GenerateIndexArraySetGridCellIndices(
DimensionSet grid_dims, DimensionSet input_dims,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> index_transform, Index num_positions) {
const DimensionIndex num_grid_dims = grid_dims.count();
std::vector<Index> temp_cell_indices(num_grid_dims * num_positions);
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dims.index_view()) {
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
const OutputIndexMapRef<> map =
index_transform.output_index_map(output_dim);
Index* cur_cell_indices = temp_cell_indices.data() + grid_i;
if (map.method() == OutputIndexMethod::single_input_dimension) {
TENSORSTORE_RETURN_IF_ERROR(GenerateSingleInputDimensionOutputIndices(
map, input_dims, index_transform, cur_cell_indices, num_grid_dims));
} else {
assert(map.method() == OutputIndexMethod::array);
TENSORSTORE_RETURN_IF_ERROR(GenerateIndexArrayOutputIndices(
map, input_dims, index_transform, cur_cell_indices, num_grid_dims));
}
for (Index* end = cur_cell_indices + num_positions * num_grid_dims;
cur_cell_indices != end; cur_cell_indices += num_grid_dims) {
*cur_cell_indices =
output_to_grid_cell(grid_dim, *cur_cell_indices, nullptr);
}
++grid_i;
}
return temp_cell_indices;
}
struct IndirectIndicesEqual {
const Index* index_vectors;
DimensionIndex num_dims;
bool operator()(Index a, Index b) const {
return std::equal(index_vectors + a * num_dims,
index_vectors + a * num_dims + num_dims,
index_vectors + b * num_dims);
}
};
struct IndirectIndicesLess {
const Index* index_vectors;
DimensionIndex num_dims;
bool operator()(Index a, Index b) const {
return std::lexicographical_compare(
index_vectors + a * num_dims, index_vectors + a * num_dims + num_dims,
index_vectors + b * num_dims, index_vectors + b * num_dims + num_dims);
}
};
struct IndirectHashIndices {
const Index* index_vectors;
DimensionIndex num_dims;
size_t operator()(Index x) const {
return absl::Hash<HashHelper>()(HashHelper{index_vectors, num_dims, x});
}
private:
struct HashHelper {
const Index* index_vectors;
DimensionIndex num_dims;
Index index;
template <typename H>
friend H AbslHashValue(H h, HashHelper x) {
return H::combine_contiguous(
std::move(h), x.index_vectors + x.index * x.num_dims, x.num_dims);
}
};
};
using IndirectVectorMap = absl::flat_hash_map<Index, Index, IndirectHashIndices,
IndirectIndicesEqual>;
IndirectVectorMap PartitionIndexArraySetGridCellIndexVectors(
const Index* temp_cell_indices, Index num_positions, Index num_grid_dims,
std::vector<Index>* grid_cell_indices,
std::vector<Index>* grid_cell_partition_offsets) {
IndirectVectorMap cells(
1, IndirectHashIndices{temp_cell_indices, num_grid_dims},
IndirectIndicesEqual{temp_cell_indices, num_grid_dims});
for (DimensionIndex i = 0; i < num_positions; ++i) {
++cells[i];
}
grid_cell_indices->resize(num_grid_dims * cells.size());
grid_cell_partition_offsets->resize(cells.size());
std::transform(cells.begin(), cells.end(),
grid_cell_partition_offsets->begin(),
[](IndirectVectorMap::const_reference x) { return x.first; });
std::sort(grid_cell_partition_offsets->begin(),
grid_cell_partition_offsets->end(),
IndirectIndicesLess{temp_cell_indices, num_grid_dims});
{
Index offset = 0;
Index* grid_cell_indices_ptr = grid_cell_indices->data();
for (Index& position_i_or_offset : *grid_cell_partition_offsets) {
const Index position_i = position_i_or_offset;
auto it = cells.find(position_i);
assert(it != cells.end());
auto& count_or_offset = it->second;
const Index count = count_or_offset;
position_i_or_offset = count_or_offset = offset;
offset += count;
grid_cell_indices_ptr =
std::copy_n(temp_cell_indices + position_i * num_grid_dims,
num_grid_dims, grid_cell_indices_ptr);
}
}
return cells;
}
SharedArray<Index, 2> GenerateIndexArraySetPartitionedInputIndices(
DimensionSet input_dims, BoxView<> full_input_domain,
IndirectVectorMap cells, Index num_positions) {
const DimensionIndex num_input_dims = input_dims.count();
Box<dynamic_rank(internal::kNumInlinedDims)> partial_input_domain(
num_input_dims);
{
DimensionIndex i = 0;
for (DimensionIndex input_dim : input_dims.index_view()) {
partial_input_domain[i] = full_input_domain[input_dim];
++i;
}
}
SharedArray<Index, 2> partitioned_input_indices =
AllocateArray<Index>({num_positions, num_input_dims});
Index position_i = 0;
IterateOverIndexRange(
partial_input_domain, [&](tensorstore::span<const Index> indices) {
auto it = cells.find(position_i);
assert(it != cells.end());
auto& offset = it->second;
std::copy(indices.begin(), indices.end(),
partitioned_input_indices.data() + offset * num_input_dims);
++offset;
++position_i;
});
return partitioned_input_indices;
}
absl::Status FillIndexArraySetData(
IndexTransformGridPartition::IndexArraySet& index_array_set,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> index_transform) {
TENSORSTORE_ASSIGN_OR_RETURN(
Index num_positions,
ProductOfIndirectExtents(index_transform.input_shape(),
index_array_set.input_dimensions));
if (num_positions == 0) {
return absl::OkStatus();
}
TENSORSTORE_ASSIGN_OR_RETURN(
std::vector<Index> temp_cell_indices,
GenerateIndexArraySetGridCellIndices(
index_array_set.grid_dimensions, index_array_set.input_dimensions,
grid_output_dimensions, output_to_grid_cell, index_transform,
num_positions));
IndirectVectorMap cells = PartitionIndexArraySetGridCellIndexVectors(
temp_cell_indices.data(), num_positions,
index_array_set.grid_dimensions.count(),
&index_array_set.grid_cell_indices,
&index_array_set.grid_cell_partition_offsets);
index_array_set.partitioned_input_indices =
GenerateIndexArraySetPartitionedInputIndices(
index_array_set.input_dimensions, index_transform.domain().box(),
std::move(cells), num_positions);
return absl::OkStatus();
}
absl::Status GenerateIndexTransformGridPartitionData(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> index_transform,
IndexTransformGridPartition& grid_partition) {
IndexTransformGridPartition::StridedSet strided_sets[kMaxRank];
DimensionIndex num_strided_sets = 0;
std::pair<DimensionSet, DimensionSet> index_array_sets[kMaxRank];
DimensionIndex num_index_array_sets = 0;
ForEachConnectedSet(
grid_output_dimensions, index_transform,
[&](DimensionSet input_dims, DimensionSet grid_dims, bool has_array) {
if (!has_array) {
assert(input_dims.count() == 1);
strided_sets[num_strided_sets++] = {grid_dims,
input_dims.index_view().front()};
} else {
index_array_sets[num_index_array_sets++] = {grid_dims, input_dims};
}
});
grid_partition.strided_sets_.assign(&strided_sets[0],
&strided_sets[num_strided_sets]);
grid_partition.index_array_sets_.resize(num_index_array_sets);
for (DimensionIndex i = 0; i < num_index_array_sets; ++i) {
auto& set = grid_partition.index_array_sets_[i];
auto [grid_dims, input_dims] = index_array_sets[i];
set.input_dimensions = input_dims;
set.grid_dimensions = grid_dims;
TENSORSTORE_RETURN_IF_ERROR(FillIndexArraySetData(
set, grid_output_dimensions, output_to_grid_cell, index_transform));
}
return absl::OkStatus();
}
}
internal_index_space::TransformRep::Ptr<> InitializeCellTransform(
const IndexTransformGridPartition& info,
IndexTransformView<> full_transform) {
const DimensionIndex full_input_rank = full_transform.input_rank();
DimensionIndex num_index_array_dims = 0;
for (const IndexArraySet& index_array_set : info.index_array_sets()) {
num_index_array_dims += index_array_set.input_dimensions.count();
}
const DimensionIndex cell_input_rank =
full_input_rank - num_index_array_dims + info.index_array_sets().size();
internal_index_space::TransformRep::Ptr<> cell_transform =
TransformRep::Allocate(cell_input_rank, full_input_rank);
cell_transform->input_rank = cell_input_rank;
cell_transform->output_rank = full_input_rank;
cell_transform->implicit_lower_bounds = false;
cell_transform->implicit_upper_bounds = false;
const tensorstore::span<Index> input_origin =
cell_transform->input_origin().first(cell_input_rank);
const tensorstore::span<OutputIndexMap> output_maps =
cell_transform->output_index_maps().first(full_input_rank);
{
DimensionIndex cell_input_dim = 0;
for (const IndexArraySet& index_array_set : info.index_array_sets()) {
input_origin[cell_input_dim] = 0;
for (const DimensionIndex full_input_dim :
index_array_set.input_dimensions.index_view()) {
auto& map = output_maps[full_input_dim];
map.offset() = 0;
map.stride() = 1;
auto& index_array_data = map.SetArrayIndexing(cell_input_rank);
std::fill_n(index_array_data.byte_strides, cell_input_rank, 0);
index_array_data.byte_strides[cell_input_dim] =
index_array_set.partitioned_input_indices.byte_strides()[0];
}
++cell_input_dim;
}
for (const auto& strided_set : info.strided_sets()) {
auto& map = output_maps[strided_set.input_dimension];
map.SetSingleInputDimension(cell_input_dim);
map.offset() = 0;
map.stride() = 1;
++cell_input_dim;
}
}
for (DimensionIndex cell_input_dim = info.index_array_sets().size() +
info.strided_sets().size(),
full_input_dim = 0;
full_input_dim < full_input_rank; ++full_input_dim) {
auto& map = output_maps[full_input_dim];
if (map.method() != OutputIndexMethod::constant) continue;
map.SetSingleInputDimension(cell_input_dim);
map.offset() = 0;
map.stride() = 1;
cell_transform->input_dimension(cell_input_dim) =
internal_index_space::TransformAccess::rep(full_transform)
->input_dimension(full_input_dim);
++cell_input_dim;
}
return cell_transform;
}
absl::Status PrePartitionIndexTransformOverGrid(
IndexTransformView<> index_transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
IndexTransformGridPartition& grid_partition) {
const DimensionIndex input_rank = index_transform.input_rank();
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
const IndexInterval domain = index_transform.input_domain()[input_dim];
if (!IsFinite(domain)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Input dimension ", input_dim,
" has unbounded domain ", domain, "."));
}
}
for (const DimensionIndex output_dim : grid_output_dimensions) {
const OutputIndexMapRef<> map =
index_transform.output_index_map(output_dim);
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
auto status = GetAffineTransformRange(
index_transform.input_domain()[map.input_dimension()],
map.offset(), map.stride())
.status();
if (!status.ok()) {
return MaybeAnnotateStatus(
status, tensorstore::StrCat("Computing range of output dimension ",
output_dim));
}
}
return internal_grid_partition::GenerateIndexTransformGridPartitionData(
grid_output_dimensions, output_to_grid_cell, index_transform,
grid_partition);
}
}
} | #include "tensorstore/internal/grid_partition_impl.h"
#include <ostream>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/irregular_grid.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_grid_partition {
std::ostream& operator<<(std::ostream& os,
const IndexTransformGridPartition::StridedSet& s) {
return os << "{grid_dimensions=" << s.grid_dimensions
<< ", input_dimension=" << s.input_dimension << "}";
}
bool operator==(const IndexTransformGridPartition::StridedSet& a,
const IndexTransformGridPartition::StridedSet& b) {
return a.input_dimension == b.input_dimension &&
a.grid_dimensions == b.grid_dimensions;
}
bool operator!=(const IndexTransformGridPartition::StridedSet& a,
const IndexTransformGridPartition::StridedSet& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os,
const IndexTransformGridPartition::IndexArraySet& s) {
return os << "IndexArraySet where:\n"
<< " grid_dimensions=" << s.grid_dimensions << "\n"
<< " input_dimensions=" << s.input_dimensions << "\n"
<< " grid_cell_indices="
<< Array(s.grid_cell_indices.data(),
{s.num_partitions(),
static_cast<Index>(s.grid_dimensions.count())})
<< "\n"
<< " partitioned_input_indices=" << s.partitioned_input_indices
<< "\n"
<< " grid_cell_partition_offsets="
<< tensorstore::span(s.grid_cell_partition_offsets) << "\n";
}
bool operator==(const IndexTransformGridPartition::IndexArraySet& a,
const IndexTransformGridPartition::IndexArraySet& b) {
return a.input_dimensions == b.input_dimensions &&
a.grid_dimensions == b.grid_dimensions &&
a.grid_cell_indices == b.grid_cell_indices &&
a.partitioned_input_indices == b.partitioned_input_indices &&
a.grid_cell_partition_offsets == b.grid_cell_partition_offsets;
}
bool operator!=(const IndexTransformGridPartition::IndexArraySet& a,
const IndexTransformGridPartition::IndexArraySet& b) {
return !(a == b);
}
}
}
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::IrregularGrid;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
TEST(RegularGridTest, Basic) {
std::vector<Index> grid_cell_shape{1, 2, 3};
RegularGridRef grid{grid_cell_shape};
EXPECT_EQ(3, grid.rank());
IndexInterval grid_cell;
EXPECT_EQ(grid(0, 7, &grid_cell), 7);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(7, 1));
EXPECT_EQ(grid(1, 7, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(6, 2));
EXPECT_EQ(grid(2, 7, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(6, 3));
}
TEST(RegularGridTest, Empty) {
RegularGridRef grid;
EXPECT_EQ(0, grid.rank());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, NoGridDimensions) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({5})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
tensorstore::span<const DimensionIndex> grid_output_dimensions;
tensorstore::span<const Index> grid_cell_shape;
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, NoConnectedSets) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({5})
.output_constant(0, 3)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {2};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, StridedSingleSet) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({5})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {2};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
0}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
StridedSingleDimensionSets) {
auto transform = tensorstore::IndexTransformBuilder<>(5, 4)
.input_origin({1, 2, 3, 4, 5})
.input_shape({6, 7, 8, 9, 10})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 3)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {2, 0};
const Index grid_cell_shape[] = {5, 10};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
4},
IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({1}),
2}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, DiagonalStridedSet) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({1})
.input_shape({6})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0, 1};
const Index grid_cell_shape[] = {5, 10};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0, 1}),
0}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, DiagonalStridedSets) {
auto transform = tensorstore::IndexTransformBuilder<>(5, 4)
.input_origin({1, 2, 3, 4, 5})
.input_shape({6, 7, 8, 9, 10})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 4)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 3)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {2, 0, 1};
const Index grid_cell_shape[] = {5, 10, 15};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0, 2}),
4},
IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({1}),
2}));
EXPECT_THAT(partitioned.index_array_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, SingleIndexArrayDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {4};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0}),
DimensionSet::FromIndices({0}),
{1, 3, 5},
MakeArray<Index>({{0}, {3}, {1}, {2}}),
{0, 1, 2}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
IndexArrayAndStridedDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.output_single_input_dimension(1, 3, 5, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
const Index grid_cell_shape[] = {10, 4};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0}),
{0, 1, 0, 5, 1, 3, 1, 5},
MakeArray<Index>({{0}, {1}, {3}, {2}}),
{0, 1, 2, 3}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
IndexArrayAndStridedDimensionIndependent) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{0}, {0}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0, 1};
const Index grid_cell_shape[] = {3, 1};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({1}),
DimensionSet::FromIndices({0}),
{0},
MakeArray<Index>({{0}, {1}}),
{0}}));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
1}));
}
TEST(PrePartitionIndexTransformOverRegularGridTest,
TwoOutputsTwoDimensionalIndexArrays) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({-1, 2})
.input_shape({2, 3})
.output_index_array(0, 5, 2, MakeArray<Index>({{1, 2, 3}, {3, 4, 5}}))
.output_index_array(1, 2, 1, MakeArray<Index>({{5, 9, 1}, {8, 2, 3}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
const Index grid_cell_shape[] = {3, 5};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0, 1}),
{1, 2, 1, 3, 2, 1, 3, 1, 3, 2},
MakeArray<Index>({{-1, 4}, {0, 3}, {0, 4}, {-1, 2}, {-1, 3}, {0, 2}}),
{0, 2, 3, 4, 5}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverRegularGridTest, UnboundedDomain) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({-kInfIndex})
.input_shape({100})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {5};
IndexTransformGridPartition partitioned;
auto status = PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned);
EXPECT_THAT(status,
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension 0 has unbounded domain .*"));
}
TEST(PrePartitionIndexTransformOverRegularGridTest, IndexArrayOutOfBounds) {
auto transform = tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({2, 3, 4}),
IndexInterval::Closed(3, 10))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
const Index grid_cell_shape[] = {5};
IndexTransformGridPartition partitioned;
auto status = PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned);
EXPECT_THAT(status,
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 2 is outside valid range \\[3, 11\\)"));
}
TEST(PrePartitionIndexTransformOverRegularGridTest, StridedDimensionOverflow) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.output_single_input_dimension(1, -kInfIndex, -kInfIndex, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
const Index grid_cell_shape[] = {10, 4};
IndexTransformGridPartition partitioned;
auto status = PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, RegularGridRef{grid_cell_shape},
partitioned);
EXPECT_THAT(status, MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(PrePartitionIndexTransformOverGridTest, SingleIndexArrayDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0};
std::vector<Index> dimension0{-1, 5, 10};
IrregularGrid grid{{dimension0}};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0}),
DimensionSet::FromIndices({0}),
{1, 2},
MakeArray<Index>({{0}, {1}, {2}, {3}}),
{0, 1}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverGridTest, IndexArrayAndStridedDimension) {
auto transform =
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({0})
.input_shape({4})
.output_index_array(0, 5, 2, MakeArray<Index>({1, 9, 8, 4}))
.output_single_input_dimension(1, 3, 5, 0)
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
std::vector<Index> dimension0{-1, 6, 9, 15};
std::vector<Index> dimension1{10, 11, 15, 22};
IrregularGrid grid({dimension0, dimension1});
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0}),
{0, -1, 1, 3, 2, 2, 3, 1},
MakeArray<Index>({{0}, {1}, {2}, {3}}),
{0, 1, 2, 3}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
TEST(PrePartitionIndexTransformOverGridTest,
IndexArrayAndStridedDimensionIndependent) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{0}, {0}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {0, 1};
std::vector<Index> dimension0{1, 2, 3, 7};
std::vector<Index> dimension1{-1, 0, 2, 4, 5};
IrregularGrid grid({dimension0, dimension1});
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({1}),
DimensionSet::FromIndices({0}),
{1},
MakeArray<Index>({{0}, {1}}),
{0}}));
EXPECT_THAT(partitioned.strided_sets(),
ElementsAre(IndexTransformGridPartition::StridedSet{
DimensionSet::FromIndices({0}),
1}));
}
TEST(PrePartitionIndexTransformOverGridTest,
TwoOutputsTwoDimensionalIndexArrays) {
auto transform =
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({-1, 2})
.input_shape({2, 3})
.output_index_array(0, 5, 2, MakeArray<Index>({{1, 2, 3}, {3, 4, 5}}))
.output_index_array(1, 2, 1, MakeArray<Index>({{5, 9, 1}, {8, 2, 3}}))
.Finalize()
.value();
const DimensionIndex grid_output_dimensions[] = {1, 0};
std::vector<Index> dimension0{1, 2, 3, 7, 10};
std::vector<Index> dimension1{-1, 0, 2, 4, 5, 8};
IrregularGrid grid{{dimension0, dimension1}};
IndexTransformGridPartition partitioned;
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, partitioned));
EXPECT_THAT(
partitioned.index_array_sets(),
ElementsAre(IndexTransformGridPartition::IndexArraySet{
DimensionSet::FromIndices({0, 1}),
DimensionSet::FromIndices({0, 1}),
{2, 5, 3, 4, 4, 5},
MakeArray<Index>({{-1, 4}, {0, 3}, {0, 4}, {-1, 2}, {-1, 3}, {0, 2}}),
{0, 3, 4}}));
EXPECT_THAT(partitioned.strided_sets(), ElementsAre());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_partition_impl.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_partition_impl_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b8d8fa37-7a95-4e06-b3a1-4b2054b61d5c | cpp | google/tensorstore | box_difference | tensorstore/internal/box_difference.cc | tensorstore/internal/box_difference_test.cc | #include "tensorstore/internal/box_difference.h"
#include <cassert>
#include <limits>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/integer_overflow.h"
namespace tensorstore {
namespace internal {
namespace {
Index GetNumSubtractionSubBoxes(BoxView<> outer, BoxView<> inner) {
assert(outer.rank() == inner.rank());
const DimensionIndex rank = outer.rank();
Index total_count = 1;
for (DimensionIndex i = 0; i < rank; ++i) {
IndexInterval outer_interval = outer[i];
IndexInterval inner_interval = inner[i];
Index num_parts = 1;
if (Intersect(outer_interval, inner_interval).empty()) {
return 1;
}
if (outer_interval.inclusive_min() < inner_interval.inclusive_min()) {
++num_parts;
}
if (outer_interval.inclusive_max() > inner_interval.inclusive_max()) {
++num_parts;
}
total_count *= num_parts;
}
return total_count - 1;
}
}
BoxDifference::BoxDifference(BoxView<> outer, BoxView<> inner)
: outer_(outer),
inner_(inner),
num_sub_boxes_(GetNumSubtractionSubBoxes(outer, inner)) {}
void BoxDifference::GetSubBox(Index sub_box_index, MutableBoxView<> out) const {
const DimensionIndex rank = out.rank();
assert(rank == outer_.rank());
assert(sub_box_index >= 0 && sub_box_index < num_sub_boxes_);
++sub_box_index;
for (DimensionIndex i = 0; i < rank; ++i) {
IndexInterval outer_interval = outer_[i];
IndexInterval inner_interval = inner_[i];
Index num_parts = 1;
IndexInterval intersection = Intersect(outer_interval, inner_interval);
if (intersection.empty()) {
out.DeepAssign(outer_);
return;
}
const bool has_before =
outer_interval.inclusive_min() < inner_interval.inclusive_min();
const bool has_after =
outer_interval.inclusive_max() > inner_interval.inclusive_max();
if (has_before) ++num_parts;
if (has_after) ++num_parts;
const Index part_i = sub_box_index % num_parts;
switch (part_i) {
case 0:
out[i] = intersection;
break;
case 1:
if (has_before) {
out[i] = IndexInterval::UncheckedHalfOpen(
outer_interval.inclusive_min(), inner_interval.inclusive_min());
break;
}
[[fallthrough]];
case 2:
out[i] = IndexInterval::UncheckedHalfOpen(
inner_interval.exclusive_max(), outer_interval.exclusive_max());
break;
}
sub_box_index /= num_parts;
}
}
}
} | #include "tensorstore/internal/box_difference.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::internal::BoxDifference;
std::vector<Box<>> Subtract(BoxView<> outer, BoxView<> inner) {
BoxDifference difference(outer, inner);
Index count = difference.num_sub_boxes();
std::vector<Box<>> boxes(count);
for (Index i = 0; i < count; ++i) {
auto& out = boxes[i];
out.set_rank(outer.rank());
difference.GetSubBox(i, out);
}
return boxes;
}
TEST(BoxDifferenceTest, RankZero) {
EXPECT_THAT(Subtract(BoxView<>(), BoxView<>()),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankOneEmptyResult) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({1}, {5})),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankOneFullResult) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({6}, {5})),
::testing::UnorderedElementsAre(BoxView({1}, {5})));
}
TEST(BoxDifferenceTest, RankOneBeforeOnly) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({3}, {4})),
::testing::UnorderedElementsAre(BoxView({1}, {2})));
}
TEST(BoxDifferenceTest, RankOneAfterOnly) {
EXPECT_THAT(Subtract(BoxView({1}, {5}), BoxView({0}, {3})),
::testing::UnorderedElementsAre(BoxView({3}, {3})));
}
TEST(BoxDifferenceTest, RankOneBeforeAndAfter) {
EXPECT_THAT(
Subtract(BoxView({1}, {5}), BoxView({2}, {2})),
::testing::UnorderedElementsAre(BoxView({1}, {1}), BoxView({4}, {2})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({1, 2}, {5, 7})),
::testing::UnorderedElementsAre());
}
TEST(BoxDifferenceTest, RankTwoDim0FullDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({6, 2}, {5, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {5, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Full) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({1, 10}, {5, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {5, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {3, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {4, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1Empty) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 2}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {1, 7}),
BoxView({5, 2}, {1, 7})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1Before) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, 4}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 1}, {7, 3})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1After) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, -1}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {7, 4})));
}
TEST(BoxDifferenceTest, RankTwoDim0EmptyDim1BeforeAfter) {
EXPECT_THAT(Subtract(BoxView({2, 1}, {7, 5}), BoxView({2, 2}, {7, 3})),
::testing::UnorderedElementsAre(BoxView({2, 1}, {7, 1}),
BoxView({2, 5}, {7, 1})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 4}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({1, 4}, {3, 5}),
BoxView({4, 2}, {2, 2}),
BoxView({1, 2}, {3, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 4}, {3, 7})),
::testing::UnorderedElementsAre(BoxView({2, 4}, {4, 5}),
BoxView({1, 2}, {1, 2}),
BoxView({2, 2}, {4, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1Before) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 4}, {3, 7})),
::testing::UnorderedElementsAre(
BoxView({1, 4}, {1, 5}), BoxView({5, 4}, {1, 5}),
BoxView({2, 2}, {3, 2}), BoxView({1, 2}, {1, 2}),
BoxView({5, 2}, {1, 2})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({4, 2}, {3, 1})),
::testing::UnorderedElementsAre(BoxView({1, 2}, {3, 1}),
BoxView({4, 3}, {2, 6}),
BoxView({1, 3}, {3, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0AfterDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({-1, 2}, {3, 1})),
::testing::UnorderedElementsAre(BoxView({2, 2}, {4, 1}),
BoxView({1, 3}, {1, 6}),
BoxView({2, 3}, {4, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1After) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 2}, {3, 1})),
::testing::UnorderedElementsAre(
BoxView({1, 2}, {1, 1}), BoxView({5, 2}, {1, 1}),
BoxView({2, 3}, {3, 6}), BoxView({1, 3}, {1, 6}),
BoxView({5, 3}, {1, 6})));
}
TEST(BoxDifferenceTest, RankTwoDim0BeforeAfterDim1BeforeAfter) {
EXPECT_THAT(Subtract(BoxView({1, 2}, {5, 7}), BoxView({2, 3}, {3, 1})),
::testing::UnorderedElementsAre(
BoxView({1, 3}, {1, 1}), BoxView({5, 3}, {1, 1}),
BoxView({2, 2}, {3, 1}), BoxView({1, 2}, {1, 1}),
BoxView({5, 2}, {1, 1}), BoxView({2, 4}, {3, 5}),
BoxView({1, 4}, {1, 5}), BoxView({5, 4}, {1, 5})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/box_difference.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/box_difference_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
49087145-934b-4828-9aec-9b8a982968fa | cpp | google/tensorstore | parse_json_matches | tensorstore/internal/parse_json_matches.cc | tensorstore/internal/parse_json_matches_test.cc | #include "tensorstore/internal/parse_json_matches.h"
#include <ostream>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
namespace tensorstore {
namespace internal {
namespace {
class Matcher : public ::testing::MatcherInterface<std::string> {
public:
Matcher(::testing::Matcher<::nlohmann::json> json_matcher)
: json_matcher_(std::move(json_matcher)) {}
bool MatchAndExplain(
std::string value,
::testing::MatchResultListener* listener) const override {
return json_matcher_.MatchAndExplain(
tensorstore::internal::ParseJson(value), listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "when parsed as JSON ";
json_matcher_.DescribeTo(os);
}
private:
::testing::Matcher<::nlohmann::json> json_matcher_;
};
}
::testing::Matcher<std::string> ParseJsonMatches(
::testing::Matcher<::nlohmann::json> json_matcher) {
return ::testing::MakeMatcher(new Matcher(std::move(json_matcher)));
}
::testing::Matcher<std::string> ParseJsonMatches(::nlohmann::json json) {
return ParseJsonMatches(MatchesJson(json));
}
}
} | #include "tensorstore/internal/parse_json_matches.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::internal::ParseJsonMatches;
TEST(ParseJsonMatchesTest, Describe) {
std::ostringstream ss;
ParseJsonMatches(::nlohmann::json(true)).DescribeTo(&ss);
EXPECT_EQ("when parsed as JSON matches json true", ss.str());
}
TEST(ParseJsonMatchesTest, Explain) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(ParseJsonMatches(::nlohmann::json(true)),
"false", &listener);
EXPECT_EQ(
"where the difference is:\n"
"[\n"
" {\n"
" \"op\": \"replace\",\n"
" \"path\": \"\",\n"
" \"value\": false\n"
" }\n"
"]",
listener.str());
}
TEST(ParseJsonMatchesTest, Matches) {
EXPECT_THAT("{\"a\":\"b\"}", ParseJsonMatches(::nlohmann::json{{"a", "b"}}));
EXPECT_THAT("{\"a\":\"b\"}",
::testing::Not(ParseJsonMatches(::nlohmann::json{{"a", "c"}})));
EXPECT_THAT("invalid",
::testing::Not(ParseJsonMatches(::nlohmann::json{{"a", "c"}})));
EXPECT_THAT("{\"a\":\"b\"}",
ParseJsonMatches(::testing::Not(::nlohmann::json{{"a", "c"}})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/parse_json_matches.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/parse_json_matches_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
0dc92b90-ee98-47c9-89c7-7aa60cc39d16 | cpp | google/tensorstore | nditerable_util | tensorstore/internal/nditerable_util.cc | tensorstore/internal/nditerable_util_test.cc | #include "tensorstore/internal/nditerable_util.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
#ifndef NDEBUG
bool nditerable_use_unit_block_size = false;
#endif
template <bool Full>
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
NDIterationLayoutInfo<Full>* info) {
info->shape.assign(shape.begin(), shape.end());
info->directions.resize(shape.size());
info->iteration_dimensions.clear();
info->iteration_shape.clear();
if constexpr (Full) {
info->full_iteration_dimensions.clear();
}
info->empty = false;
using DirectionPref = NDIterableLayoutConstraint::DirectionPref;
DirectionPref direction_prefs[kMaxRank];
std::fill_n(
direction_prefs, shape.size(),
constraints.repeated_elements_constraint() == skip_repeated_elements
? DirectionPref::kCanSkip
: DirectionPref::kEither);
iterable.UpdateDirectionPrefs(direction_prefs);
for (DimensionIndex dim_i = 0; dim_i < shape.size(); ++dim_i) {
const Index size = shape[dim_i];
if (size == 0) {
info->empty = true;
} else if ((size == 1 &&
direction_prefs[dim_i] != DirectionPref::kForwardRequired) ||
direction_prefs[dim_i] == DirectionPref::kCanSkip) {
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_i);
}
continue;
}
info->iteration_dimensions.push_back(dim_i);
}
if (info->iteration_dimensions.empty()) {
info->iteration_dimensions.push_back(-1);
info->iteration_dimensions.push_back(-1);
info->iteration_shape.push_back(1);
info->iteration_shape.push_back(1);
} else {
if (constraints.order_constraint() == ContiguousLayoutOrder::fortran) {
std::reverse(info->iteration_dimensions.begin(),
info->iteration_dimensions.end());
} else if (constraints.order_constraint() == unspecified_order) {
std::sort(info->iteration_dimensions.begin(),
info->iteration_dimensions.end(),
[&](DimensionIndex dim_i, DimensionIndex dim_j) {
return iterable.GetDimensionOrder(dim_i, dim_j) < 0;
});
}
DimensionIndex dim_i = info->iteration_dimensions[0];
Index size_i = shape[dim_i];
info->iteration_shape.push_back(size_i);
int dir_i =
NDIterableLayoutConstraint::GetDirection(direction_prefs[dim_i]);
info->directions[dim_i] = dir_i;
auto next_iteration_dim_it = info->iteration_dimensions.begin();
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_i);
}
for (DimensionIndex i = 1;
i < static_cast<DimensionIndex>(info->iteration_dimensions.size());
++i) {
DimensionIndex dim_j = info->iteration_dimensions[i];
Index size_j = shape[dim_j];
int dir_j =
NDIterableLayoutConstraint::GetDirection(direction_prefs[dim_j]);
info->directions[dim_j] = dir_j;
if constexpr (Full) {
info->full_iteration_dimensions.push_back(dim_j);
}
Index size_combined;
if (iterable.CanCombineDimensions(dim_i, dir_i, dim_j, dir_j, size_j) &&
!MulOverflow(size_i, size_j, &size_combined)) {
size_j = size_combined;
info->iteration_shape.back() = size_combined;
} else {
info->iteration_shape.push_back(size_j);
++next_iteration_dim_it;
}
*next_iteration_dim_it = dim_j;
dim_i = dim_j;
size_i = size_j;
dir_i = dir_j;
}
info->iteration_dimensions.erase(next_iteration_dim_it + 1,
info->iteration_dimensions.end());
}
if (info->iteration_dimensions.size() < 2) {
assert(info->iteration_dimensions.size() == 1);
info->iteration_dimensions.insert(info->iteration_dimensions.begin(), -1);
info->iteration_shape.insert(info->iteration_shape.begin(), 1);
}
}
}
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
NDIterationSimplifiedLayoutInfo* info) {
GetNDIterationLayoutInfo<false>(iterable, shape, constraints, info);
}
void GetNDIterationLayoutInfo(const NDIterableLayoutConstraint& iterable,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
NDIterationFullLayoutInfo* info) {
GetNDIterationLayoutInfo<true>(iterable, shape, constraints, info);
}
IterationBufferShape GetNDIterationBlockShape(
ptrdiff_t working_memory_bytes_per_element,
tensorstore::span<const Index> iteration_shape) {
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
return {1, 1};
#else
#if !defined(NDEBUG)
if (nditerable_use_unit_block_size) {
return {1, 1};
}
#endif
constexpr Index kTargetMemoryUsage = 24 * 1024;
const Index penultimate_dimension_size =
iteration_shape[iteration_shape.size() - 2];
const Index last_dimension_size = iteration_shape[iteration_shape.size() - 1];
if (working_memory_bytes_per_element == 0) {
return {penultimate_dimension_size, last_dimension_size};
} else {
const Index target_size = std::max(
Index(8), kTargetMemoryUsage / Index(working_memory_bytes_per_element));
const Index block_inner_size =
std::max(Index(1), std::min(last_dimension_size, target_size));
Index block_outer_size = 1;
if (block_inner_size < target_size) {
block_outer_size =
std::min(penultimate_dimension_size, target_size / block_inner_size);
}
return {block_outer_size, block_inner_size};
}
#endif
}
IterationBufferShape GetNDIterationBlockShape(
const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout, IterationBufferKind buffer_kind) {
return GetNDIterationBlockShape(
iterable.GetWorkingMemoryBytesPerElement(layout, buffer_kind),
layout.iteration_shape);
}
void GetNDIterationBufferInfo(const NDIterableBufferConstraint& iterable,
NDIterable::IterationLayoutView layout,
NDIterationBufferInfo* buffer_info) {
buffer_info->buffer_kind =
iterable.GetIterationBufferConstraint(layout).min_buffer_kind;
buffer_info->block_shape =
GetNDIterationBlockShape(iterable, layout, buffer_info->buffer_kind);
}
#ifndef NDEBUG
void SetNDIterableTestUnitBlockSize(bool value) {
nditerable_use_unit_block_size = value;
}
#endif
Index UpdatePartialBlock(NDIterator& iterator,
tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferKind buffer_kind,
IterationBufferPointer buffer, Index modified_count,
absl::Status* status) {
Index full_rows = modified_count / block_shape[1];
Index final_row_count = modified_count % block_shape[1];
Index updated = 0;
if (full_rows != 0) {
updated = iterator.UpdateBlock(indices, {full_rows, block_shape[1]}, buffer,
status);
if (ABSL_PREDICT_FALSE(updated != full_rows * block_shape[1])) {
return updated;
}
}
if (final_row_count != 0) {
buffer.AddElementOffset(buffer_kind, full_rows, 0);
Index final_row_indices[kMaxRank];
std::copy(indices.begin(), indices.end(), final_row_indices);
final_row_indices[indices.size() - 2] += full_rows;
updated += iterator.UpdateBlock(
tensorstore::span<const Index>(final_row_indices, indices.size()),
{1, final_row_count}, buffer, status);
}
return updated;
}
}
} | #include "tensorstore/internal/nditerable_util.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::GetNDIterationBlockShape;
using ::tensorstore::internal::NDIterationPositionStepper;
using ::tensorstore::internal::ResetBufferPositionAtBeginning;
using ::tensorstore::internal::ResetBufferPositionAtEnd;
using ::tensorstore::internal::StepBufferPositionBackward;
using ::tensorstore::internal::StepBufferPositionForward;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TEST(GetNDIterationBlockShape, Basic) {
#ifndef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
constexpr auto expected_block_size = [](Index block_size) {
return block_size;
};
#else
constexpr auto expected_block_size = [](Index block_size) { return 1; };
#endif
EXPECT_THAT(
GetNDIterationBlockShape(0,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(expected_block_size(4), expected_block_size(1000000)));
EXPECT_THAT(
GetNDIterationBlockShape(1,
tensorstore::span<const Index>({3, 4, 15})),
ElementsAre(expected_block_size(4), expected_block_size(15)));
EXPECT_THAT(
GetNDIterationBlockShape(1,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(24 * 1024)));
EXPECT_THAT(
GetNDIterationBlockShape(32,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(768)));
EXPECT_THAT(
GetNDIterationBlockShape(64,
tensorstore::span<const Index>({3, 4, 1000000})),
ElementsAre(1, expected_block_size(384)));
}
TEST(ResetBufferPositionTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{42};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0));
ResetBufferPositionAtEnd(shape, 1, position.data());
EXPECT_THAT(position, ElementsAre(9));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(6));
}
TEST(ResetBufferPositionTest, TwoDimensional) {
std::vector<Index> shape{10, 15};
std::vector<Index> position{42, 43};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0, 0));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(9, 11));
}
TEST(ResetBufferPositionTest, ThreeDimensional) {
std::vector<Index> shape{10, 15, 19};
std::vector<Index> position{42, 43, 44};
ResetBufferPositionAtBeginning(position);
EXPECT_THAT(position, ElementsAre(0, 0, 0));
ResetBufferPositionAtEnd(shape, 4, position.data());
EXPECT_THAT(position, ElementsAre(9, 14, 15));
}
TEST(StepBufferPositionForwardTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{0};
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(8));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(10));
}
TEST(StepBufferPositionForwardTest, TwoDimensional) {
std::vector<Index> shape{2, 10};
std::vector<Index> position{0, 0};
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 8));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 4));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 8));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(2, 0));
}
TEST(StepBufferPositionForwardTest, ThreeDimensional) {
std::vector<Index> shape{2, 2, 6};
std::vector<Index> position{0, 0, 0};
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 4));
EXPECT_EQ(4, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 0));
EXPECT_EQ(2, StepBufferPositionForward(
shape, 4, 4, position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 4));
EXPECT_EQ(0, StepBufferPositionForward(
shape, 2, 4, position.data()));
EXPECT_THAT(position, ElementsAre(2, 0, 0));
}
TEST(StepBufferPositionBackwardTest, OneDimensional) {
std::vector<Index> shape{10};
std::vector<Index> position{6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0));
}
TEST(StepBufferPositionBackwardTest, TwoDimensional) {
std::vector<Index> shape{2, 10};
std::vector<Index> position{1, 6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0));
}
TEST(StepBufferPositionBackwardTest, ThreeDimensional) {
std::vector<Index> shape{2, 2, 10};
std::vector<Index> position{1, 1, 6};
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(1, 0, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 1, 0));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 6));
EXPECT_EQ(4, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 2));
EXPECT_EQ(2, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 0));
EXPECT_EQ(0, StepBufferPositionBackward(shape, 4,
position.data()));
EXPECT_THAT(position, ElementsAre(0, 0, 0));
}
TEST(NDIterationPositionStepperTest, Forward) {
std::vector<Index> shape({2, 3, 7});
NDIterationPositionStepper stepper(shape, 4);
EXPECT_THAT(stepper.shape(), ElementsAreArray(shape));
using PositionsAndBlockSizes =
std::vector<std::pair<std::vector<Index>, Index>>;
PositionsAndBlockSizes expected_results{
{{0, 0, 0}, 4}, {{0, 0, 4}, 3},
{{0, 1, 0}, 4}, {{0, 1, 4}, 3},
{{0, 2, 0}, 4}, {{0, 2, 4}, 3},
{{1, 0, 0}, 4}, {{1, 0, 4}, 3},
{{1, 1, 0}, 4}, {{1, 1, 4}, 3},
{{1, 2, 0}, 4}, {{1, 2, 4}, 3},
};
PositionsAndBlockSizes results;
for (Index block_size = stepper.ResetAtBeginning(); block_size;
block_size = stepper.StepForward(block_size)) {
results.emplace_back(
std::vector(stepper.position().begin(), stepper.position().end()),
block_size);
}
EXPECT_THAT(results, ElementsAreArray(expected_results));
}
TEST(NDIterationPositionStepperTest, Backward) {
std::vector<Index> shape({2, 3, 7});
NDIterationPositionStepper stepper(shape, 4);
EXPECT_THAT(stepper.shape(), ElementsAreArray(shape));
using PositionsAndBlockSizes =
std::vector<std::pair<std::vector<Index>, Index>>;
PositionsAndBlockSizes expected_results{
{{1, 2, 3}, 4}, {{1, 2, 0}, 3},
{{1, 1, 3}, 4}, {{1, 1, 0}, 3},
{{1, 0, 3}, 4}, {{1, 0, 0}, 3},
{{0, 2, 3}, 4}, {{0, 2, 0}, 3},
{{0, 1, 3}, 4}, {{0, 1, 0}, 3},
{{0, 0, 3}, 4}, {{0, 0, 0}, 3},
};
PositionsAndBlockSizes results;
for (Index block_size = stepper.ResetAtEnd(); block_size;
block_size = stepper.StepBackward()) {
results.emplace_back(
std::vector(stepper.position().begin(), stepper.position().end()),
block_size);
}
EXPECT_THAT(results, ElementsAreArray(expected_results));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
bf7adb85-df95-490d-9b94-a1483c405e16 | cpp | google/tensorstore | uri_utils | tensorstore/internal/uri_utils.cc | tensorstore/internal/uri_utils_test.cc | #include "tensorstore/internal/uri_utils.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <string_view>
#include "absl/strings/ascii.h"
namespace tensorstore {
namespace internal {
namespace {
inline int HexDigitToInt(char c) {
assert(absl::ascii_isxdigit(c));
int x = static_cast<unsigned char>(c);
if (x > '9') {
x += 9;
}
return x & 0xf;
}
inline char IntToHexDigit(int x) {
assert(x >= 0 && x < 16);
return "0123456789ABCDEF"[x];
}
}
void PercentEncodeReserved(std::string_view src, std::string& dest,
AsciiSet unreserved) {
size_t num_escaped = 0;
for (char c : src) {
if (!unreserved.Test(c)) ++num_escaped;
}
if (num_escaped == 0) {
dest = src;
return;
}
dest.clear();
dest.reserve(src.size() + 2 * num_escaped);
for (char c : src) {
if (unreserved.Test(c)) {
dest += c;
} else {
dest += '%';
dest += IntToHexDigit(static_cast<unsigned char>(c) / 16);
dest += IntToHexDigit(static_cast<unsigned char>(c) % 16);
}
}
}
void PercentDecodeAppend(std::string_view src, std::string& dest) {
dest.reserve(dest.size() + src.size());
for (size_t i = 0; i < src.size();) {
char c = src[i];
char x, y;
if (c != '%' || i + 2 >= src.size() ||
!absl::ascii_isxdigit((x = src[i + 1])) ||
!absl::ascii_isxdigit((y = src[i + 2]))) {
dest += c;
++i;
continue;
}
dest += static_cast<char>(HexDigitToInt(x) * 16 + HexDigitToInt(y));
i += 3;
}
}
ParsedGenericUri ParseGenericUri(std::string_view uri) {
static constexpr std::string_view kSchemeSep(":
ParsedGenericUri result;
const auto scheme_start = uri.find(kSchemeSep);
std::string_view uri_suffix;
if (scheme_start == std::string_view::npos) {
uri_suffix = uri;
} else {
result.scheme = uri.substr(0, scheme_start);
uri_suffix = uri.substr(scheme_start + kSchemeSep.size());
}
const auto fragment_start = uri_suffix.find('#');
const auto query_start = uri_suffix.substr(0, fragment_start).find('?');
const auto path_end = std::min(query_start, fragment_start);
result.authority_and_path = uri_suffix.substr(0, path_end);
if (const auto path_start = result.authority_and_path.find('/');
path_start == 0 || result.authority_and_path.empty()) {
result.authority = {};
result.path = result.authority_and_path;
} else if (path_start != std::string_view::npos) {
result.authority = result.authority_and_path.substr(0, path_start);
result.path = result.authority_and_path.substr(path_start);
} else {
result.authority = result.authority_and_path;
result.path = {};
}
if (query_start != std::string_view::npos) {
result.query =
uri_suffix.substr(query_start + 1, fragment_start - query_start - 1);
}
if (fragment_start != std::string_view::npos) {
result.fragment = uri_suffix.substr(fragment_start + 1);
}
return result;
}
}
} | #include "tensorstore/internal/uri_utils.h"
#include <string_view>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
using ::tensorstore::internal::AsciiSet;
using ::tensorstore::internal::ParseGenericUri;
using ::tensorstore::internal::PercentDecode;
using ::tensorstore::internal::PercentEncodeReserved;
using ::tensorstore::internal::PercentEncodeUriComponent;
using ::tensorstore::internal::PercentEncodeUriPath;
namespace {
TEST(PercentDecodeTest, NoOp) {
std::string_view s = "abcd %zz %%";
EXPECT_THAT(PercentDecode(s), ::testing::Eq(s));
}
TEST(PercentDecodeTest, EscapeSequenceInMiddle) {
EXPECT_THAT(PercentDecode("abc%20efg"), ::testing::Eq("abc efg"));
}
TEST(PercentDecodeTest, EscapeSequenceAtEnd) {
EXPECT_THAT(PercentDecode("abc%20"), ::testing::Eq("abc "));
}
TEST(PercentDecodeTest, EscapeSequenceLetter) {
EXPECT_THAT(PercentDecode("abc%fF"), ::testing::Eq("abc\xff"));
}
TEST(PercentEncodeReservedTest, Basic) {
constexpr AsciiSet kMyUnreservedChars{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789/."};
std::string_view s =
"abcdefghijklmnopqrstuvwxyz"
"/ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"/01234.56789";
EXPECT_THAT(PercentEncodeReserved(s, kMyUnreservedChars), ::testing::Eq(s));
std::string_view t = "-_!~*'()";
EXPECT_THAT(PercentEncodeReserved(t, kMyUnreservedChars),
::testing::Eq("%2D%5F%21%7E%2A%27%28%29"));
}
TEST(PercentEncodeUriPathTest, NoOp) {
std::string_view s =
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.!~*'():@&=+$,;/";
EXPECT_THAT(PercentEncodeUriPath(s), ::testing::Eq(s));
}
TEST(PercentEncodeUriPathTest, Percent) {
EXPECT_THAT(PercentEncodeUriPath("%"), ::testing::Eq("%25"));
}
TEST(PercentEncodeUriPathTest, NonAscii) {
EXPECT_THAT(PercentEncodeUriPath("\xff"), ::testing::Eq("%FF"));
}
TEST(PercentEncodeUriComponentTest, NoOp) {
std::string_view s =
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.!~*'()";
EXPECT_THAT(PercentEncodeUriComponent(s), ::testing::Eq(s));
}
TEST(PercentEncodeUriComponentTest, Percent) {
EXPECT_THAT(PercentEncodeUriComponent("%"), ::testing::Eq("%25"));
}
TEST(PercentEncodeUriComponentTest, NonAscii) {
EXPECT_THAT(PercentEncodeUriComponent("\xff"), ::testing::Eq("%FF"));
}
TEST(ParseGenericUriTest, PathOnly) {
auto parsed = ParseGenericUri("/abc/def");
EXPECT_EQ("", parsed.scheme);
EXPECT_EQ("/abc/def", parsed.authority_and_path);
EXPECT_EQ("", parsed.authority);
EXPECT_EQ("/abc/def", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, GsScheme) {
auto parsed = ParseGenericUri("gs:
EXPECT_EQ("gs", parsed.scheme);
EXPECT_EQ("bucket/path", parsed.authority_and_path);
EXPECT_EQ("bucket", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityNoPath) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityRootPath) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathQuery) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("query", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathFragment) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("fragment", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathQueryFragment) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("query", parsed.query);
EXPECT_EQ("fragment", parsed.fragment);
}
TEST(ParseGenericUriTest, SchemeAuthorityPathFragmentQuery) {
auto parsed = ParseGenericUri("http:
EXPECT_EQ("http", parsed.scheme);
EXPECT_EQ("host:port/path", parsed.authority_and_path);
EXPECT_EQ("host:port", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("fragment?query", parsed.fragment);
}
TEST(ParseGenericUriTest, S3Scheme) {
auto parsed = ParseGenericUri("s3:
EXPECT_EQ("s3", parsed.scheme);
EXPECT_EQ("bucket/path", parsed.authority_and_path);
EXPECT_EQ("bucket", parsed.authority);
EXPECT_EQ("/path", parsed.path);
EXPECT_EQ("", parsed.query);
EXPECT_EQ("", parsed.fragment);
}
TEST(ParseGenericUriTest, Basic) {
static constexpr std::pair<std::string_view, std::string_view> kCases[] = {
{"http:
{"http:
{"http:
{"http:
{"http:
{"http:
{"http:
};
for (const auto& [uri, authority] : kCases) {
EXPECT_THAT(ParseGenericUri(uri).authority, ::testing::Eq(authority));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/uri_utils.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/uri_utils_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
01b382fb-eb4b-4c1c-ad70-57e111c10137 | cpp | google/tensorstore | multi_barrier | tensorstore/internal/multi_barrier.cc | tensorstore/internal/multi_barrier_test.cc | #include "tensorstore/internal/multi_barrier.h"
#include <cassert>
namespace tensorstore {
namespace internal {
namespace {
bool IsZero(void* arg) { return *reinterpret_cast<int*>(arg) == 0; }
}
MultiBarrier::MultiBarrier(int num_threads)
: blocking_{num_threads, 0}, asleep_(0), num_threads_(num_threads << 1) {
assert(num_threads > 0);
}
MultiBarrier::~MultiBarrier() {
absl::MutexLock l(&lock_);
lock_.Await(absl::Condition(IsZero, &asleep_));
}
bool MultiBarrier::Block() {
absl::MutexLock l(&lock_);
int& num_to_block = blocking_[num_threads_ & 1];
num_to_block--;
assert(num_to_block >= 0);
if (num_to_block == 0) {
int num_threads = num_threads_ >> 1;
num_threads_ ^= 1;
blocking_[num_threads_ & 1] = num_threads;
asleep_ = num_threads;
} else {
lock_.Await(absl::Condition(IsZero, &num_to_block));
}
asleep_--;
return asleep_ == 0;
}
}
} | #include "tensorstore/internal/multi_barrier.h"
#include <type_traits>
#include <gtest/gtest.h>
#include "tensorstore/internal/thread/thread.h"
namespace internal = tensorstore::internal;
namespace {
template <typename T>
struct MultiBarrierFixture : public ::testing::Test {};
using NumThreadTypes = ::testing::Types<std::integral_constant<int, 1>,
std::integral_constant<int, 2>,
std::integral_constant<int, 16>>;
TYPED_TEST_SUITE(MultiBarrierFixture, NumThreadTypes);
TYPED_TEST(MultiBarrierFixture, Example) {
constexpr int kIterations = 1000;
constexpr int kNumThreads = TypeParam{}();
internal::MultiBarrier barrier(kNumThreads);
std::atomic<int> winner[kNumThreads] = {};
std::atomic<int> loser[kNumThreads] = {};
internal::Thread threads[kNumThreads];
for (int i = 0; i < kNumThreads; i++) {
threads[i] = internal::Thread({"sanity"}, [&, id = i]() {
for (int j = 0; j < kIterations; j++) {
if (barrier.Block()) {
winner[id]++;
} else {
loser[id]++;
}
}
});
}
for (auto& thread : threads) {
thread.Join();
}
int sum = 0;
for (auto& x : winner) {
sum += x;
}
EXPECT_EQ(kIterations, sum);
sum = 0;
for (auto& x : loser) {
sum += x;
}
EXPECT_EQ(kIterations * (kNumThreads - 1), sum);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_barrier.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_barrier_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7a0a8077-f919-4e65-aa04-39bfe4a9ae57 | cpp | google/tensorstore | nditerable_copy | tensorstore/internal/nditerable_copy.cc | tensorstore/internal/nditerable_copy_test.cc | #include "tensorstore/internal/nditerable_copy.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
NDIterableCopyManager::NDIterableCopyManager(const NDIterable* input,
const NDIterable* output)
: Base{{{input, output}}} {
assert(input->dtype() == output->dtype());
}
NDIterableCopyManager::BufferParameters
NDIterableCopyManager::GetBufferParameters(
NDIterable::IterationLayoutView layout) const {
BufferParameters result;
auto input_constraint = input()->GetIterationBufferConstraint(layout);
auto output_constraint = output()->GetIterationBufferConstraint(layout);
if (!input_constraint.external || !output_constraint.external) {
result.input_buffer_kind = result.output_buffer_kind = std::max(
input_constraint.min_buffer_kind, output_constraint.min_buffer_kind);
} else {
result.input_buffer_kind = input_constraint.min_buffer_kind;
result.output_buffer_kind = output_constraint.min_buffer_kind;
}
result.buffer_source =
input_constraint.external
? (output_constraint.external ? BufferSource::kExternal
: BufferSource::kOutput)
: (output_constraint.external ? BufferSource::kInput
: BufferSource::kBoth);
return result;
}
std::ptrdiff_t NDIterableCopyManager::GetWorkingMemoryBytesPerElement(
NDIterable::IterationLayoutView layout) const {
auto buffer_parameters = GetBufferParameters(layout);
std::ptrdiff_t num_bytes = 0;
num_bytes += input()->GetWorkingMemoryBytesPerElement(
layout, buffer_parameters.input_buffer_kind);
num_bytes += output()->GetWorkingMemoryBytesPerElement(
layout, buffer_parameters.output_buffer_kind);
if (buffer_parameters.buffer_source == BufferSource::kExternal) {
num_bytes += input()->dtype()->size;
if (std::max(buffer_parameters.input_buffer_kind,
buffer_parameters.output_buffer_kind) ==
IterationBufferKind::kIndexed) {
num_bytes += sizeof(Index);
}
}
return num_bytes;
}
bool NDIteratorCopyManager::CopyImplBoth(NDIteratorCopyManager* self,
tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
absl::Status* status) {
IterationBufferPointer input_pointer, output_pointer;
return self->input_->GetBlock(indices, block_shape, &input_pointer, status) &&
self->output_->GetBlock(indices, block_shape, &output_pointer,
status) &&
self->copy_elements_function_(nullptr, block_shape, input_pointer,
output_pointer, status) &&
self->output_->UpdateBlock(indices, block_shape, output_pointer,
status);
}
bool NDIteratorCopyManager::CopyImplInput(
NDIteratorCopyManager* self, tensorstore::span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) {
IterationBufferPointer pointer;
return self->input_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->UpdateBlock(indices, block_shape, pointer, status);
}
bool NDIteratorCopyManager::CopyImplOutput(
NDIteratorCopyManager* self, tensorstore::span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) {
IterationBufferPointer pointer;
return self->output_->GetBlock(indices, block_shape, &pointer, status) &&
self->input_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->UpdateBlock(indices, block_shape, pointer, status);
}
bool NDIteratorCopyManager::CopyImplExternal(
NDIteratorCopyManager* self, tensorstore::span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) {
return self->input_->GetBlock(indices, block_shape,
&self->buffer_manager_.buffer_pointers()[0][0],
status) &&
self->output_->GetBlock(indices, block_shape,
&self->buffer_manager_.buffer_pointers()[1][0],
status) &&
self->output_->UpdateBlock(
indices, block_shape,
self->buffer_manager_.buffer_pointers()[1][0], status);
}
NDIteratorCopyManager::NDIteratorCopyManager(
const NDIterableCopyManager& iterable,
NDIterable::IterationBufferLayoutView layout, ArenaAllocator<> allocator)
: buffer_manager_(allocator) {
auto buffer_parameters = iterable.GetBufferParameters(layout);
input_ = iterable.input()->GetIterator(
{layout, buffer_parameters.input_buffer_kind});
output_ = iterable.output()->GetIterator(
{layout, buffer_parameters.output_buffer_kind});
switch (buffer_parameters.buffer_source) {
case NDIterableCopyManager::BufferSource::kInput:
copy_impl_ = NDIteratorCopyManager::CopyImplInput;
break;
case NDIterableCopyManager::BufferSource::kOutput:
copy_impl_ = NDIteratorCopyManager::CopyImplOutput;
break;
case NDIterableCopyManager::BufferSource::kBoth:
copy_impl_ = NDIteratorCopyManager::CopyImplBoth;
copy_elements_function_ =
iterable.input()
->dtype()
->copy_assign[buffer_parameters.input_buffer_kind];
break;
case NDIterableCopyManager::BufferSource::kExternal:
copy_impl_ = NDIteratorCopyManager::CopyImplExternal;
buffer_manager_.Initialize(layout.block_shape,
{{iterable.input()->dtype()}},
{{{{buffer_parameters.input_buffer_kind,
buffer_parameters.output_buffer_kind}}}});
break;
}
}
NDIterableCopier::NDIterableCopier(const NDIterable& input,
const NDIterable& output,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
Arena* arena)
: NDIterableCopier(NDIterableCopyManager(&input, &output), shape,
constraints, arena) {}
NDIterableCopier::NDIterableCopier(
const NDIterableCopyManager& iterable_copy_manager,
tensorstore::span<const Index> shape, IterationConstraints constraints,
Arena* arena)
: layout_info_(iterable_copy_manager, shape, constraints),
block_shape_(GetNDIterationBlockShape(
iterable_copy_manager.GetWorkingMemoryBytesPerElement(
layout_info_.layout_view()),
layout_info_.iteration_shape)),
iterator_copy_manager_(iterable_copy_manager,
{layout_info_.layout_view(), block_shape_},
arena) {}
absl::Status NDIterableCopier::Copy() {
tensorstore::span<const Index> iteration_shape = layout_info_.iteration_shape;
std::fill_n(position_, iteration_shape.size(), static_cast<Index>(0));
if (layout_info_.empty) {
return absl::OkStatus();
}
absl::Status copy_status;
if (Index inner_block_size = block_shape_[1];
inner_block_size != iteration_shape.back()) {
assert(block_shape_[0] == 1);
for (Index block_size = inner_block_size; block_size;) {
if (!iterator_copy_manager_.Copy(
tensorstore::span<const Index>(position_, iteration_shape.size()),
{1, block_size}, ©_status)) {
return GetElementCopyErrorStatus(std::move(copy_status));
}
block_size = StepBufferPositionForward(iteration_shape, block_size,
inner_block_size, position_);
}
} else {
const Index outer_block_size = block_shape_[0];
for (Index block_size = outer_block_size; block_size;) {
if (!iterator_copy_manager_.Copy(
tensorstore::span<const Index>(position_, iteration_shape.size()),
{block_size, inner_block_size}, ©_status)) {
return GetElementCopyErrorStatus(std::move(copy_status));
}
block_size = StepBufferPositionForward(
iteration_shape.first(iteration_shape.size() - 1), block_size,
outer_block_size, position_);
}
}
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/nditerable_copy.h"
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dtype_v;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::Shared;
using ::tensorstore::internal::GetElementwiseInputTransformNDIterable;
using ::tensorstore::internal::GetElementwiseOutputTransformNDIterable;
using ::tensorstore::internal::GetTransformedArrayNDIterable;
TEST(NDIterableCopyTest, Example) {
auto source_array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest_array = tensorstore::AllocateArray<int>(
{2, 3}, tensorstore::c_order, tensorstore::value_init);
auto dest_element_transform = [](const int* source, int* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 5) {
*status = absl::UnknownError("5");
return false;
}
*dest = *source;
return true;
};
tensorstore::internal::ElementwiseClosure<2, void*> dest_closure =
tensorstore::internal::SimpleElementwiseFunction<
decltype(dest_element_transform)(const int, int),
void*>::Closure(&dest_element_transform);
tensorstore::internal::Arena arena;
auto source_iterable =
GetTransformedArrayNDIterable(source_array, &arena).value();
auto dest_iterable = GetElementwiseOutputTransformNDIterable(
GetTransformedArrayNDIterable(dest_array, &arena).value(), dtype_v<int>,
dest_closure, &arena);
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *dest_iterable, dest_array.shape(),
tensorstore::c_order, &arena);
EXPECT_EQ(absl::UnknownError("5"), copier.Copy());
EXPECT_EQ(MakeArray<int>({{1, 2, 3}, {4, 0, 0}}), dest_array);
}
template <typename IntermediateElement, typename SourceArray,
typename SourceElementTransform, typename DestElementTransform,
typename DestArray>
absl::Status TestCopy(tensorstore::IterationConstraints constraints,
SourceArray source_array,
SourceElementTransform source_element_transform,
DestElementTransform dest_element_transform,
DestArray dest_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<2, void*> source_closure =
tensorstore::internal::SimpleElementwiseFunction<
SourceElementTransform(typename SourceArray::Element,
IntermediateElement),
void*>::Closure(&source_element_transform);
tensorstore::internal::ElementwiseClosure<2, void*> dest_closure =
tensorstore::internal::SimpleElementwiseFunction<
DestElementTransform(IntermediateElement,
typename DestArray::Element),
void*>::Closure(&dest_element_transform);
auto source_iterable = GetElementwiseInputTransformNDIterable(
{{GetTransformedArrayNDIterable(source_array, &arena).value()}},
dtype_v<IntermediateElement>, source_closure, &arena);
auto dest_iterable = GetElementwiseOutputTransformNDIterable(
GetTransformedArrayNDIterable(dest_array, &arena).value(),
dtype_v<IntermediateElement>, dest_closure, &arena);
return tensorstore::internal::NDIterableCopier(
*source_iterable, *dest_iterable, dest_array.shape(), constraints,
&arena)
.Copy();
}
TEST(NDIterableCopyTest, ExternalBuffer) {
for (const bool indexed_source : {false, true}) {
for (const bool indexed_dest : {false, true}) {
SCOPED_TRACE(absl::StrCat("indexed_source=", indexed_source,
", indexed_dest=", indexed_dest)
.c_str());
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
tensorstore::TransformedArray<Shared<const int>> tsource = source;
if (indexed_source) {
tsource = (source |
tensorstore::Dims(0, 1).OuterIndexArraySlice(
MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2})))
.value();
}
auto dest = tensorstore::AllocateArray<double>(source.shape());
tensorstore::TransformedArray<Shared<double>> tdest = dest;
if (indexed_dest) {
tdest =
(dest | tensorstore::Dims(0, 1).OuterIndexArraySlice(
MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2})))
.value();
}
EXPECT_EQ(absl::OkStatus(),
(TestCopy<unsigned int>(
{}, tsource,
[](const int* source, unsigned int* dest, void* status) {
*dest = *source * 2;
},
[](const unsigned int* source, double* dest, void* status) {
*dest = *source + 100.0;
},
tdest)));
EXPECT_EQ(tensorstore::MakeArray<double>(
{{102.0, 104.0, 106.0}, {108.0, 110.0, 112.0}}),
dest);
}
}
}
class MaybeUnitBlockSizeTest : public ::testing::TestWithParam<bool> {
public:
MaybeUnitBlockSizeTest() {
#ifndef NDEBUG
tensorstore::internal::SetNDIterableTestUnitBlockSize(GetParam());
#endif
}
~MaybeUnitBlockSizeTest() {
#ifndef NDEBUG
tensorstore::internal::SetNDIterableTestUnitBlockSize(false);
#endif
}
};
INSTANTIATE_TEST_SUITE_P(NormalBlockSize, MaybeUnitBlockSizeTest,
::testing::Values(false));
#ifndef NDEBUG
INSTANTIATE_TEST_SUITE_P(UnitBlockSize, MaybeUnitBlockSizeTest,
::testing::Values(true));
#endif
TEST_P(MaybeUnitBlockSizeTest, InnerIndexArray) {
constexpr size_t length = 5000;
auto source = tensorstore::AllocateArray<int>({length});
auto dest = tensorstore::AllocateArray<int>({length});
auto expected = tensorstore::AllocateArray<int>({length});
auto indices = tensorstore::AllocateArray<int64_t>({length});
for (int i = 0; i < length; ++i) {
source(i) = -i;
dest(i) = 42;
indices(i) = length - 1 - i;
expected(i) = -(length - 1 - i);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::TransformedArray<Shared<const int>> tsource,
source | tensorstore::Dims(0).IndexArraySlice(indices));
tensorstore::TransformedArray<Shared<int>> tdest = dest;
tensorstore::internal::Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto source_iterable, GetTransformedArrayNDIterable(tsource, &arena));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto dest_iterable, GetTransformedArrayNDIterable(tdest, &arena));
TENSORSTORE_ASSERT_OK(tensorstore::internal::NDIterableCopier(
*source_iterable, *dest_iterable, dest.shape(),
{}, &arena)
.Copy());
EXPECT_EQ(expected, dest);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_copy.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_copy_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
dc72981d-1b15-4aa9-9f6b-e243a84e6ac8 | cpp | google/tensorstore | nditerable_array | tensorstore/internal/nditerable_array.cc | tensorstore/internal/nditerable_array_test.cc | #include "tensorstore/internal/nditerable_array.h"
#include <stddef.h>
#include <cassert>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array_util.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
Index ComputeIteratorBaseOffsetAndByteStrides(
NDIterable::IterationLayoutView layout,
tensorstore::span<const Index> orig_byte_strides, Index* byte_strides) {
assert(layout.full_rank() == orig_byte_strides.size());
Index base_offset = 0;
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
const int dir = layout.directions[dim];
if (dir == -1) {
base_offset = wrap_on_overflow::Add(
base_offset, wrap_on_overflow::Multiply(layout.shape[dim] - 1,
orig_byte_strides[dim]));
}
}
for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) {
const DimensionIndex dim = layout.iteration_dimensions[i];
if (dim == -1) {
byte_strides[i] = 0;
} else {
byte_strides[i] = orig_byte_strides[dim] * layout.directions[dim];
}
}
return base_offset;
}
template <DimensionIndex Rank>
class StridedIteratorImpl;
template <DimensionIndex Rank = -1>
class StridedIteratorImplBase
: public NDIterator::Base<StridedIteratorImpl<Rank>> {
public:
explicit StridedIteratorImplBase(DimensionIndex rank,
ArenaAllocator<> allocator)
: allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
protected:
ArenaAllocator<> allocator_;
std::array<Index, Rank> byte_strides_;
};
template <>
class StridedIteratorImplBase<-1>
: public NDIterator::Base<StridedIteratorImpl<-1>> {
public:
explicit StridedIteratorImplBase(DimensionIndex rank,
ArenaAllocator<> allocator)
: byte_strides_(rank, allocator) {}
ArenaAllocator<> get_allocator() const override {
return byte_strides_.get_allocator();
}
protected:
std::vector<Index, ArenaAllocator<Index>> byte_strides_;
};
template <DimensionIndex Rank = -1>
class StridedIteratorImpl : public StridedIteratorImplBase<Rank> {
using Base = StridedIteratorImplBase<Rank>;
using Base::byte_strides_;
public:
StridedIteratorImpl(ByteStridedPointer<void> data,
tensorstore::span<const Index> orig_byte_strides,
NDIterable::IterationLayoutView layout,
ArenaAllocator<> allocator)
: Base(layout.iteration_rank(), allocator) {
data_ = data + ComputeIteratorBaseOffsetAndByteStrides(
layout, orig_byte_strides, byte_strides_.data());
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
Index offset;
if constexpr (Rank == -1) {
offset = IndexInnerProduct(indices.size(), byte_strides_.data(),
indices.data());
} else {
offset = IndexInnerProduct<Rank>(byte_strides_.data(), indices.data());
}
*pointer = IterationBufferPointer{data_ + offset,
byte_strides_[byte_strides_.size() - 2],
byte_strides_[byte_strides_.size() - 1]};
return true;
}
private:
ByteStridedPointer<void> data_;
};
class IndexedIteratorImpl : public NDIterator::Base<IndexedIteratorImpl> {
public:
IndexedIteratorImpl(ByteStridedPointer<void> data,
tensorstore::span<const Index> orig_byte_strides,
NDIterable::IterationBufferLayoutView layout,
ArenaAllocator<> allocator)
: block_inner_size_(layout.block_shape[1]),
buffer_(layout.iteration_rank() +
layout.block_shape[0] * layout.block_shape[1],
allocator) {
data_ = data + ComputeIteratorBaseOffsetAndByteStrides(
layout, orig_byte_strides, buffer_.data());
FillOffsetsArrayFromStride(buffer_[layout.iteration_rank() - 2],
buffer_[layout.iteration_rank() - 1],
layout.block_shape[0], layout.block_shape[1],
buffer_.data() + layout.iteration_rank());
}
ArenaAllocator<> get_allocator() const override {
return buffer_.get_allocator();
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
*pointer = IterationBufferPointer{
data_ +
IndexInnerProduct(indices.size(), buffer_.data(), indices.data()),
block_inner_size_, buffer_.data() + indices.size()};
return true;
}
private:
ByteStridedPointer<void> data_;
Index block_inner_size_;
std::vector<Index, ArenaAllocator<Index>> buffer_;
};
class ArrayIterableImpl : public NDIterable::Base<ArrayIterableImpl> {
public:
ArrayIterableImpl(SharedOffsetArrayView<const void> array,
ArenaAllocator<> allocator)
: dtype_(array.dtype()),
byte_strides_(array.byte_strides().begin(), array.byte_strides().end(),
allocator) {
void* origin_pointer =
const_cast<void*>(array.byte_strided_origin_pointer().get());
data_ = std::shared_ptr<void>(std::move(array.pointer()), origin_pointer);
}
ArenaAllocator<> get_allocator() const override {
return byte_strides_.get_allocator();
}
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
return GetDimensionOrderFromByteStrides(byte_strides_[dim_i],
byte_strides_[dim_j]);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
UpdateDirectionPrefsFromByteStrides(byte_strides_, prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
return CanCombineStridedArrayDimensions(
byte_strides_[dim_i], dir_i, byte_strides_[dim_j], dir_j, size_j);
}
DataType dtype() const override { return dtype_; }
IterationBufferConstraint GetIterationBufferConstraint(
IterationLayoutView layout) const override {
const DimensionIndex last_dim = layout.iteration_dimensions.back();
return {(last_dim == -1 ||
(byte_strides_[last_dim] * layout.directions[last_dim] ==
dtype_->size))
? IterationBufferKind::kContiguous
: IterationBufferKind::kStrided,
false};
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0;
}
NDIterator::Ptr GetIterator(
IterationBufferKindLayoutView layout) const override {
if (layout.buffer_kind == IterationBufferKind::kIndexed) {
return MakeUniqueWithVirtualIntrusiveAllocator<IndexedIteratorImpl>(
get_allocator(), data_.get(), byte_strides_, layout);
}
const auto make_strided_iterator = [&](auto rank) {
return MakeUniqueWithVirtualIntrusiveAllocator<
StridedIteratorImpl<decltype(rank)::value>>(
get_allocator(), data_.get(), byte_strides_, layout);
};
switch (layout.iteration_rank()) {
#ifndef TENSORSTORE_NDITERABLE_DISABLE_ARRAY_OPTIMIZE
case 2:
return make_strided_iterator(
std::integral_constant<DimensionIndex, 2>{});
case 3:
return make_strided_iterator(
std::integral_constant<DimensionIndex, 3>{});
#endif
default:
assert(layout.iteration_rank() > 1);
return make_strided_iterator(
std::integral_constant<DimensionIndex, -1>{});
}
}
private:
std::shared_ptr<void> data_;
DataType dtype_;
std::vector<Index, ArenaAllocator<Index>> byte_strides_;
};
}
NDIterable::Ptr GetArrayNDIterable(SharedOffsetArrayView<const void> array,
Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<ArrayIterableImpl>(
ArenaAllocator<>(arena), std::move(array));
}
}
} | #include "tensorstore/internal/nditerable_array.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Array;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::StridedLayout;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::GetArrayNDIterable;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferPointer;
using ::tensorstore::internal::MultiNDIterator;
using ::tensorstore::internal::NDIterable;
using DirectionPref = NDIterable::DirectionPref;
TEST(NDIterableArrayTest, Direct) {
uint8_t data[1000];
Array<uint8_t> array(data + 500,
StridedLayout<>({6, 3, 4, 5}, {-1, -6, 0, 3}));
Arena arena;
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
{
std::vector<DirectionPref> direction_prefs(4, DirectionPref::kCanSkip);
iterable->UpdateDirectionPrefs(direction_prefs.data());
EXPECT_THAT(direction_prefs,
::testing::ElementsAre(
DirectionPref::kBackward, DirectionPref::kBackward,
DirectionPref::kCanSkip, DirectionPref::kForward));
}
EXPECT_GT(iterable->GetDimensionOrder(0, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(0, 2), 0);
EXPECT_GT(iterable->GetDimensionOrder(0, 3), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 0), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 2), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 3), 0);
EXPECT_GT(iterable->GetDimensionOrder(2, 0), 0);
EXPECT_GT(iterable->GetDimensionOrder(2, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 3), 0);
EXPECT_LT(iterable->GetDimensionOrder(3, 0), 0);
EXPECT_GT(iterable->GetDimensionOrder(3, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(3, 2), 0);
EXPECT_TRUE(iterable->CanCombineDimensions(1, 1,
0, 1,
6));
EXPECT_TRUE(iterable->CanCombineDimensions(1, -1,
0, -1,
6));
EXPECT_FALSE(iterable->CanCombineDimensions(1, 1,
0, -1,
6));
EXPECT_FALSE(iterable->CanCombineDimensions(1, 1,
0, 1,
5));
EXPECT_TRUE(iterable->CanCombineDimensions(3, 1,
0, -1,
3));
EXPECT_TRUE(iterable->CanCombineDimensions(3, -1,
0, 1,
3));
EXPECT_TRUE(iterable->CanCombineDimensions(1, -1,
3, 1,
2));
{
auto c = iterable->GetIterationBufferConstraint(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({1, 1, 1, 1}),
tensorstore::span<const DimensionIndex>({0, 1, 2, 3}),
tensorstore::span<const Index>({6, 3, 4, 5})});
EXPECT_EQ(IterationBufferKind::kStrided, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
{
auto c = iterable->GetIterationBufferConstraint(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({1, 1, 1, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})});
EXPECT_EQ(IterationBufferKind::kStrided, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
{
auto c = iterable->GetIterationBufferConstraint(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})});
EXPECT_EQ(IterationBufferKind::kContiguous, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
EXPECT_EQ(
0, iterable->GetWorkingMemoryBytesPerElement(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
IterationBufferKind::kContiguous));
EXPECT_EQ(
0, iterable->GetWorkingMemoryBytesPerElement(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
IterationBufferKind::kStrided));
EXPECT_EQ(
sizeof(Index),
iterable->GetWorkingMemoryBytesPerElement(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
IterationBufferKind::kIndexed));
{
auto iterator = iterable->GetIterator(
{{{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
{1, 3}},
IterationBufferKind::kContiguous});
IterationBufferPointer pointer;
absl::Status status;
EXPECT_TRUE(iterator->GetBlock(tensorstore::span<const Index>({2, 3, 1}),
{1, 3}, &pointer, &status));
EXPECT_EQ(&array((6 - 1) - 1, (3 - 1) - 2, 0, 3), pointer.pointer.get());
EXPECT_EQ(1, pointer.inner_byte_stride);
EXPECT_EQ(absl::OkStatus(), status);
}
{
auto iterator = iterable->GetIterator(
{{{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
{1, 3}},
IterationBufferKind::kIndexed});
IterationBufferPointer pointer;
absl::Status status;
EXPECT_TRUE(iterator->GetBlock(tensorstore::span<const Index>({2, 3, 1}),
{1, 3}, &pointer, &status));
EXPECT_EQ(&array((6 - 1) - 1, (3 - 1) - 2, 0, 3), pointer.pointer.get());
EXPECT_THAT(tensorstore::span<const Index>(pointer.byte_offsets, 3),
::testing::ElementsAre(0, 1, 2));
EXPECT_EQ(absl::OkStatus(), status);
}
}
TEST(NDIterableArrayTest, RankZero) {
auto array = tensorstore::MakeScalarArray<int>(5);
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
tensorstore::span<const Index>{}, {}, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, -1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre());
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre());
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre());
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.ResetAtBeginning(), ::testing::ElementsAre(1, 1));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, 1}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
TENSORSTORE_EXPECT_OK(status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(0, multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 1}), ::testing::ElementsAre(0, 1));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
#ifndef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
constexpr Index ExpectedBlockSize(Index block_size) { return block_size; }
#else
constexpr Index ExpectedBlockSize(Index block_size) { return 1; }
#endif
TEST(NDIterableArrayTest, RankOne) {
auto array = tensorstore::MakeArray<int>({1, 2, 3, 4, 5});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
tensorstore::span<const Index>({5}), {}, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(5));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 5));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(5)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(5)));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, ExpectedBlockSize(5)}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 5}), ::testing::ElementsAre(0, 5));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, RankTwoContiguous) {
auto array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(array.shape(), {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 6));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(6)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(6)));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, ExpectedBlockSize(6)}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 6}), ::testing::ElementsAre(0, 6));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, RankTwoTranspose) {
auto array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::fortran_order, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(3, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(
multi_iterator.block_shape,
::testing::ElementsAre(ExpectedBlockSize(3), ExpectedBlockSize(2)));
EXPECT_THAT(
multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(ExpectedBlockSize(3), ExpectedBlockSize(2)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
GTEST_SKIP();
#endif
EXPECT_TRUE(multi_iterator.GetBlock({3, 2}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(&array(0, 0), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int) * 3,
multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({3, 2}), ::testing::ElementsAre(0, 2));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(3, 0));
}
TEST(NDIterableArrayTest, SkipSize1Dimension) {
unsigned char data[300];
Arena arena;
Array<unsigned char> array = {&data[150],
StridedLayout<>({2, 1, 3}, {5, 10, -20})};
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(array.shape(), {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 1, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 0, -1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(3, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 2, 0));
}
TEST(NDIterableArrayTest, SkipZeroByteStride) {
unsigned char data[300];
Arena arena;
Array<unsigned char> array = {&data[150], StridedLayout<>({2, 3}, {5, 0})};
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, FortranOrderArray) {
auto array =
tensorstore::AllocateArray<int>({2, 3}, tensorstore::fortran_order);
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 6));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, ReversedDimensions) {
auto orig_array = tensorstore::AllocateArray<int>({3, 4, 5});
auto orig_shape = orig_array.shape();
auto orig_strides = orig_array.byte_strides();
Array<int> array(
&orig_array(0, 4 - 1, 5 - 1),
StridedLayout<>({orig_shape[2], orig_shape[0], orig_shape[1]},
{-orig_strides[2], orig_strides[0], -orig_strides[1]}));
Arena arena;
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(5, 3, 4));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(-1, 1, -1));
EXPECT_THAT(multi_iterator.iteration_shape,
::testing::ElementsAre(1, 3 * 4 * 5));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 2, 0));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(3 * 4 * 5)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(3 * 4 * 5)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_TRUE(
multi_iterator.GetBlock({1, ExpectedBlockSize(3 * 4 * 5)}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(orig_array.byte_strided_pointer(),
multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
}
TEST(NDIterableArrayTest, MultipleArrays) {
auto array_a = tensorstore::AllocateArray<int>({2, 3}, tensorstore::c_order);
auto array_b =
tensorstore::AllocateArray<int>({2, 3}, tensorstore::fortran_order);
Arena arena;
auto iterable_a = GetArrayNDIterable(array_a, &arena);
auto iterable_b = GetArrayNDIterable(array_b, &arena);
MultiNDIterator<2, true> multi_iterator(
array_a.shape(), tensorstore::skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(
multi_iterator.block_shape,
::testing::ElementsAre(ExpectedBlockSize(2), ExpectedBlockSize(3)));
EXPECT_THAT(
multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(ExpectedBlockSize(2), ExpectedBlockSize(3)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
GTEST_SKIP();
#endif
EXPECT_TRUE(multi_iterator.GetBlock({2, 3}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(&array_a(0, 0), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(&array_b(0, 0), multi_iterator.block_pointers()[1].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_EQ(sizeof(int) * 2,
multi_iterator.block_pointers()[1].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({2, 3}), ::testing::ElementsAre(0, 3));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(2, 0));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4d9dae04-f06a-4c96-841d-131f992ed614 | cpp | google/tensorstore | ref_counted_string | tensorstore/internal/ref_counted_string.cc | tensorstore/internal/ref_counted_string_test.cc | #include "tensorstore/internal/ref_counted_string.h"
#include <cstring>
#include <new>
namespace tensorstore {
namespace internal {
RefCountedString& RefCountedString::operator=(
const RefCountedString& other) noexcept {
if (other.data_) other.header().IncrementReferenceCount();
if (data_) header().DecrementReferenceCount();
data_ = other.data_;
return *this;
}
RefCountedString& RefCountedString::operator=(std::string_view s) {
auto* data = AllocateCopy(s);
if (data_) header().DecrementReferenceCount();
data_ = data;
return *this;
}
RefCountedString& RefCountedString::operator=(const char* s) {
return *this = std::string_view(s);
}
char* RefCountedString::Allocate(size_t size) {
if (size == 0) return nullptr;
void* ptr = ::operator new(size + sizeof(Header));
new (ptr) Header{size};
return static_cast<char*>(ptr) + sizeof(Header);
}
const char* RefCountedString::AllocateCopy(std::string_view s) {
if (s.empty()) return nullptr;
char* data = Allocate(s.size());
std::memcpy(data, s.data(), s.size());
return data;
}
void RefCountedString::Header::Deallocate() const {
::operator delete(const_cast<Header*>(this), length + sizeof(Header));
}
}
} | #include "tensorstore/internal/ref_counted_string.h"
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::RefCountedString;
using ::tensorstore::internal::RefCountedStringWriter;
TEST(RefCountedStringTest, DefaultConstruct) {
RefCountedString s;
EXPECT_EQ("", std::string_view(s));
EXPECT_EQ("", std::string(s));
EXPECT_TRUE(s.empty());
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0, s.size());
EXPECT_EQ(nullptr, s.begin());
EXPECT_EQ(nullptr, s.end());
EXPECT_EQ(s, s);
auto other = s;
EXPECT_EQ(nullptr, other.data());
}
TEST(RefCountedStringTest, EmptyStringConstruct) {
RefCountedString s("");
EXPECT_EQ("", std::string_view(s));
EXPECT_EQ("", std::string(s));
EXPECT_TRUE(s.empty());
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0, s.size());
EXPECT_EQ(nullptr, s.begin());
EXPECT_EQ(nullptr, s.end());
EXPECT_EQ(s, s);
}
TEST(RefCountedStringTest, NonEmptyStringConstruct) {
RefCountedString s("abc");
EXPECT_EQ("abc", std::string_view(s));
EXPECT_EQ("abc", std::string(s));
EXPECT_FALSE(s.empty());
EXPECT_EQ(3, s.size());
EXPECT_EQ("abc", s);
EXPECT_NE("abd", s);
EXPECT_EQ(s, "abc");
EXPECT_LT("ab", s);
EXPECT_LE("abc", s);
EXPECT_GT("abd", s);
}
TEST(RefCountedStringTest, Copy) {
RefCountedString x("abc");
RefCountedString y = x;
EXPECT_EQ(x.data(), y.data());
}
TEST(RefCountedStringTest, Move) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, EmptyMoveAssignNonEmpty) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y;
y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, EmptyMoveAssignEmpty) {
RefCountedString x;
RefCountedString y;
y = std::move(x);
EXPECT_TRUE(y.empty());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, NonEmptyMoveAssignNonEmpty) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y("def");
y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
}
TEST(RefCountedStringTest, NonEmptyMoveAssignEmpty) {
RefCountedString x;
RefCountedString y("def");
y = std::move(x);
EXPECT_TRUE(y.empty());
}
TEST(RefCountedStringTest, NonEmptyCopyAssignNonEmpty) {
RefCountedString x("abc");
RefCountedString y("def");
y = x;
EXPECT_EQ("abc", y);
}
TEST(RefCountedStringTest, EmptyCopyAssignNonEmpty) {
RefCountedString x("abc");
RefCountedString y;
y = x;
EXPECT_EQ("abc", y);
}
TEST(RefCountedStringTest, NonEmptyCopyAssignEmpty) {
RefCountedString x;
RefCountedString y("def");
y = x;
EXPECT_EQ("", y);
}
TEST(RefCountedStringTest, EmptyCopyAssignEmpty) {
RefCountedString x;
RefCountedString y;
y = x;
EXPECT_EQ("", y);
}
TEST(RefCountedStringTest, NonEmptyAssignFromStringView) {
RefCountedString x("def");
x = std::string_view("abc");
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, EmptyAssignFromStringView) {
RefCountedString x;
x = std::string_view("abc");
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, NonEmptyAssignFromCStr) {
RefCountedString x("def");
x = "abc";
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, EmptyAssignFromCStr) {
RefCountedString x;
x = "abc";
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, SelfAssign) {
RefCountedString x("abc");
x = x;
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, SelfAssignStringView) {
RefCountedString x("abc");
x = std::string_view(x);
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, Comparison) {
RefCountedString a("abc");
RefCountedString a1("abc");
std::string_view a_sv = "abc";
const char* a_cstr = "abc";
RefCountedString b("def");
std::string_view b_sv = "def";
const char* b_cstr = "def";
EXPECT_TRUE(a == a);
EXPECT_TRUE(a == a1);
EXPECT_TRUE(a == a_sv);
EXPECT_TRUE(a == a_cstr);
EXPECT_TRUE(a_sv == a);
EXPECT_TRUE(a_cstr == a);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a != a1);
EXPECT_FALSE(a != a_sv);
EXPECT_FALSE(a != a_cstr);
EXPECT_FALSE(a_sv != a);
EXPECT_FALSE(a_cstr != a);
EXPECT_TRUE(a <= a);
EXPECT_TRUE(a <= a_sv);
EXPECT_TRUE(a <= a_cstr);
EXPECT_TRUE(a_sv <= a);
EXPECT_TRUE(a_cstr <= a);
EXPECT_TRUE(a <= a1);
EXPECT_TRUE(a >= a);
EXPECT_TRUE(a >= a_sv);
EXPECT_TRUE(a >= a_cstr);
EXPECT_TRUE(a_sv >= a);
EXPECT_TRUE(a_cstr >= a);
EXPECT_TRUE(a >= a1);
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= b_sv);
EXPECT_TRUE(a <= b_cstr);
EXPECT_TRUE(a_sv <= b);
EXPECT_TRUE(a_cstr <= b);
EXPECT_TRUE(a <= b_sv);
EXPECT_TRUE(a <= b_cstr);
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < b_sv);
EXPECT_TRUE(a < b_cstr);
EXPECT_TRUE(a_sv < b);
EXPECT_TRUE(a_cstr < b);
EXPECT_FALSE(a > b);
EXPECT_FALSE(a_sv > b);
EXPECT_FALSE(a_cstr > b);
EXPECT_FALSE(a > b_sv);
EXPECT_FALSE(a > b_cstr);
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= b_sv);
EXPECT_FALSE(a >= b_cstr);
EXPECT_FALSE(a_sv >= b);
EXPECT_FALSE(a_cstr >= b);
}
TEST(RefCountedStringTest, StdStringConversion) {
std::string s = static_cast<std::string>(RefCountedString("abc"));
EXPECT_EQ("abc", s);
}
TEST(RefCountedStringTest, Indexing) {
RefCountedString x = "abc";
EXPECT_EQ('a', x[0]);
EXPECT_EQ('c', x[2]);
}
TEST(RefCountedStringTest, Writer) {
RefCountedStringWriter writer(3);
memcpy(writer.data(), "abc", 3);
RefCountedString s = std::move(writer);
EXPECT_EQ("abc", s);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/ref_counted_string.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/ref_counted_string_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1ab4b368-c54e-4004-8966-7e456cb6d6b5 | cpp | google/tensorstore | json_pointer | tensorstore/internal/json_pointer.cc | tensorstore/internal/json_pointer_test.cc | #include "tensorstore/internal/json_pointer.h"
#include <algorithm>
#include <string_view>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include <nlohmann/json.hpp>
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace json_pointer {
absl::Status Validate(std::string_view s) {
if (s.empty()) {
return absl::OkStatus();
}
const auto parse_error = [&](const auto&... message) {
return absl::InvalidArgumentError(
tensorstore::StrCat(message..., ": ", tensorstore::QuoteString(s)));
};
if (s[0] != '/') {
return parse_error("JSON Pointer does not start with '/'");
}
for (size_t i = 1; i < s.size(); ++i) {
if (s[i] != '~') continue;
if (i + 1 == s.size() || (s[i + 1] != '0' && s[i + 1] != '1')) {
return parse_error(
"JSON Pointer requires '~' to be followed by '0' or '1'");
}
++i;
}
return absl::OkStatus();
}
namespace {
unsigned char DecodeEscape(char x) {
assert(x == '0' || x == '1');
return x == '0' ? '~' : '/';
}
void DecodeReferenceToken(std::string_view encoded_token, std::string& output) {
output.clear();
output.reserve(encoded_token.size());
for (size_t i = 0; i < encoded_token.size(); ++i) {
char c = encoded_token[i];
switch (c) {
case '~':
++i;
assert(i != encoded_token.size());
output += DecodeEscape(encoded_token[i]);
break;
default:
output += c;
}
}
}
}
CompareResult Compare(std::string_view a, std::string_view b) {
const size_t mismatch_index = std::distance(
a.begin(), std::mismatch(a.begin(), a.end(), b.begin(), b.end()).first);
if (mismatch_index == a.size()) {
if (mismatch_index == b.size()) return kEqual;
if (b[mismatch_index] == '/') {
return kContains;
}
return kLessThan;
}
if (mismatch_index == b.size()) {
if (a[mismatch_index] == '/') {
return kContainedIn;
}
return kGreaterThan;
}
if (a[mismatch_index] == '/') {
return kLessThan;
}
if (b[mismatch_index] == '/') {
return kGreaterThan;
}
unsigned char a_char, b_char;
if (a[mismatch_index - 1] == '~') {
assert(mismatch_index > 0);
a_char = DecodeEscape(a[mismatch_index]);
b_char = DecodeEscape(b[mismatch_index]);
} else {
if (a[mismatch_index] == '~') {
assert(mismatch_index + 1 < a.size());
a_char = DecodeEscape(a[mismatch_index + 1]);
} else {
a_char = a[mismatch_index];
}
if (b[mismatch_index] == '~') {
assert(mismatch_index + 1 < b.size());
b_char = DecodeEscape(b[mismatch_index + 1]);
} else {
b_char = b[mismatch_index];
}
}
return a_char < b_char ? kLessThan : kGreaterThan;
}
std::string EncodeReferenceToken(std::string_view token) {
std::string result;
result.reserve(token.size());
for (char c : token) {
switch (c) {
case '~':
result += {'~', '0'};
break;
case '/':
result += {'~', '1'};
break;
default:
result += c;
}
}
return result;
}
Result<::nlohmann::json*> Dereference(::nlohmann::json& full_value,
std::string_view sub_value_pointer,
DereferenceMode mode) {
if (sub_value_pointer.empty()) {
if (full_value.is_discarded()) {
if (mode == kMustExist) {
return absl::NotFoundError("");
}
if (mode == kDelete) {
return nullptr;
}
}
return &full_value;
}
assert(sub_value_pointer[0] == '/');
size_t i = 1;
auto* sub_value = &full_value;
std::string decoded_reference_token;
while (true) {
if (sub_value->is_discarded()) {
switch (mode) {
case kMustExist:
return absl::NotFoundError("");
case kCreate:
*sub_value = ::nlohmann::json::object_t();
break;
case kSimulateCreate:
case kDelete:
return nullptr;
}
}
size_t pointer_component_end = sub_value_pointer.find('/', i);
const bool is_leaf = pointer_component_end == std::string_view::npos;
const auto quoted_pointer = [&] {
return tensorstore::QuoteString(
sub_value_pointer.substr(0, pointer_component_end));
};
std::string_view pointer_component =
sub_value_pointer.substr(i, pointer_component_end - i);
if (auto* j_obj = sub_value->get_ptr<::nlohmann::json::object_t*>()) {
DecodeReferenceToken(pointer_component, decoded_reference_token);
if (mode == kCreate) {
sub_value = &j_obj
->emplace(decoded_reference_token,
::nlohmann::json::value_t::discarded)
.first->second;
} else if (mode == kDelete && is_leaf) {
j_obj->erase(decoded_reference_token);
return nullptr;
} else {
auto it = j_obj->find(decoded_reference_token);
if (it == j_obj->end()) {
switch (mode) {
case kSimulateCreate:
case kDelete:
return nullptr;
case kMustExist:
return absl::NotFoundError(
tensorstore::StrCat("JSON Pointer ", quoted_pointer(),
" refers to non-existent object member"));
case kCreate:
ABSL_UNREACHABLE();
}
}
sub_value = &it->second;
}
} else if (auto* j_array =
sub_value->get_ptr<::nlohmann::json::array_t*>()) {
if (pointer_component == "-") {
switch (mode) {
case kMustExist:
return absl::FailedPreconditionError(
tensorstore::StrCat("JSON Pointer ", quoted_pointer(),
" refers to non-existent array element"));
case kCreate:
sub_value =
&j_array->emplace_back(::nlohmann::json::value_t::discarded);
break;
case kSimulateCreate:
case kDelete:
return nullptr;
}
} else {
size_t array_index;
if (pointer_component.empty() ||
std::any_of(pointer_component.begin(), pointer_component.end(),
[](char c) { return !absl::ascii_isdigit(c); }) ||
(pointer_component.size() > 1 && pointer_component[0] == '0') ||
!absl::SimpleAtoi(pointer_component, &array_index)) {
return absl::FailedPreconditionError(
tensorstore::StrCat("JSON Pointer ", quoted_pointer(),
" is invalid for array value"));
}
if (array_index >= j_array->size()) {
if (mode == kDelete) return nullptr;
return absl::OutOfRangeError(tensorstore::StrCat(
"JSON Pointer ", quoted_pointer(),
" is out-of-range for array of size ", j_array->size()));
}
if (mode == kDelete && is_leaf) {
j_array->erase(j_array->begin() + array_index);
return nullptr;
}
sub_value = &(*j_array)[array_index];
}
} else {
return absl::FailedPreconditionError(tensorstore::StrCat(
"JSON Pointer reference ", quoted_pointer(), " cannot be applied to ",
sub_value->type_name(), " value: ", *sub_value));
}
if (pointer_component_end == std::string_view::npos) {
assert(mode != kDelete);
return sub_value;
}
i += pointer_component.size() + 1;
}
}
Result<const ::nlohmann::json*> Dereference(const ::nlohmann::json& full_value,
std::string_view sub_value_pointer,
DereferenceMode mode) {
assert(mode == kMustExist || mode == kSimulateCreate);
return json_pointer::Dereference(const_cast<::nlohmann::json&>(full_value),
sub_value_pointer, mode);
}
absl::Status Replace(::nlohmann::json& full_value,
std::string_view sub_value_pointer,
::nlohmann::json new_sub_value) {
if (sub_value_pointer.empty()) {
full_value = std::move(new_sub_value);
return absl::OkStatus();
}
if (!new_sub_value.is_discarded()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto* sub_value,
json_pointer::Dereference(full_value, sub_value_pointer, kCreate));
*sub_value = std::move(new_sub_value);
return absl::OkStatus();
}
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(full_value, sub_value_pointer, kDelete));
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/json_pointer.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::json_pointer::Compare;
using ::tensorstore::json_pointer::CompareResult;
using ::tensorstore::json_pointer::Dereference;
using ::tensorstore::json_pointer::EncodeReferenceToken;
using ::tensorstore::json_pointer::kCreate;
using ::tensorstore::json_pointer::kDelete;
using ::tensorstore::json_pointer::kMustExist;
using ::tensorstore::json_pointer::kSimulateCreate;
using ::tensorstore::json_pointer::Replace;
using ::tensorstore::json_pointer::Validate;
using ::testing::Optional;
using ::testing::Pointee;
TEST(ValidateTest, Valid) {
TENSORSTORE_EXPECT_OK(Validate(""));
TENSORSTORE_EXPECT_OK(Validate("/"));
TENSORSTORE_EXPECT_OK(Validate("/a/"));
TENSORSTORE_EXPECT_OK(Validate("/abc"));
TENSORSTORE_EXPECT_OK(Validate("/abc/"));
TENSORSTORE_EXPECT_OK(Validate("/abc/def"));
TENSORSTORE_EXPECT_OK(Validate("/abc/def/xy~0/~1"));
}
TEST(ValidateTest, Invalid) {
EXPECT_THAT(Validate("foo"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"JSON Pointer does not start with '/': \"foo\""));
EXPECT_THAT(
Validate("/~~"),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"JSON Pointer requires '~' to be followed by '0' or '1': \"/~~\""));
EXPECT_THAT(
Validate("/~"),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"JSON Pointer requires '~' to be followed by '0' or '1': \"/~\""));
EXPECT_THAT(
Validate(std::string_view("/~0", 2)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"JSON Pointer requires '~' to be followed by '0' or '1': \"/~\""));
}
TEST(CompareTest, Basic) {
EXPECT_EQ(Compare("", ""), CompareResult::kEqual);
EXPECT_EQ(Compare("", "/foo"), CompareResult::kContains);
EXPECT_EQ(Compare("/foo", ""), CompareResult::kContainedIn);
EXPECT_EQ(Compare("/a", "/b"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/a", "/ab"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/a/b", "/acc"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/acc", "/a/b"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a*c", "/a/b"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/ab", "/a"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a~0", "/a~1"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a~1", "/a~0"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/a~0", "/ax"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/a~1", "/ax"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/ax", "/a~0"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/ax", "/a~1"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/xx", "/xx/abc"), CompareResult::kContains);
EXPECT_EQ(Compare("/xx/abc", "/xx"), CompareResult::kContainedIn);
EXPECT_EQ(Compare("/abc", "/acc"), CompareResult::kLessThan);
EXPECT_EQ(Compare("/b", "/a"), CompareResult::kGreaterThan);
EXPECT_EQ(Compare("/ba", "/ab"), CompareResult::kGreaterThan);
}
TEST(EncodeReferenceTokenTest, Basic) {
EXPECT_EQ("", EncodeReferenceToken(""));
EXPECT_EQ("abc", EncodeReferenceToken("abc"));
EXPECT_EQ("abc~0", EncodeReferenceToken("abc~"));
EXPECT_EQ("abc~1", EncodeReferenceToken("abc/"));
EXPECT_EQ("abc~1~0xyz", EncodeReferenceToken("abc/~xyz"));
}
TEST(DereferenceTest, ExamplesFromRfc6901) {
::nlohmann::json document = {
{"foo", {"bar", "baz"}},
{"", 0},
{"a/b", 1},
{"c%d", 2},
{"e^f", 3},
{"g|h", 4},
{"i\\j", 5},
{"k\"l", 6},
{" ", 7},
{"m~n", 8},
};
EXPECT_THAT(Dereference(document, "", kMustExist), Optional(&document));
EXPECT_THAT(Dereference(document, "/foo", kMustExist),
Optional(Pointee(::nlohmann::json{"bar", "baz"})));
EXPECT_THAT(Dereference(document, "/foo/0", kMustExist),
Optional(Pointee(::nlohmann::json("bar"))));
EXPECT_THAT(Dereference(document, "/", kMustExist), Optional(Pointee(0)));
EXPECT_THAT(Dereference(document, "/a~1b", kMustExist), Optional(Pointee(1)));
EXPECT_THAT(Dereference(document, "/c%d", kMustExist), Optional(Pointee(2)));
EXPECT_THAT(Dereference(document, "/e^f", kMustExist), Optional(Pointee(3)));
EXPECT_THAT(Dereference(document, "/g|h", kMustExist), Optional(Pointee(4)));
EXPECT_THAT(Dereference(document, "/i\\j", kMustExist), Optional(Pointee(5)));
EXPECT_THAT(Dereference(document, "/k\"l", kMustExist), Optional(Pointee(6)));
EXPECT_THAT(Dereference(document, "/ ", kMustExist), Optional(Pointee(7)));
EXPECT_THAT(Dereference(document, "/m~0n", kMustExist), Optional(Pointee(8)));
}
TEST(DereferenceTest, ConstAccess) {
EXPECT_THAT(Dereference(true, "", kMustExist), Optional(Pointee(true)));
EXPECT_THAT(Dereference(true, "/", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/\" cannot be applied to "
"boolean value: true"));
EXPECT_THAT(
Dereference(true, "/a/b/c", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/a\" cannot be applied to "
"boolean value: true"));
EXPECT_THAT(Dereference({1, 2, 3}, "/0", kMustExist), Optional(Pointee(1)));
EXPECT_THAT(Dereference({1, 2, 3}, "/1", kMustExist), Optional(Pointee(2)));
EXPECT_THAT(
Dereference({1, 2, 3}, "/3", kMustExist),
MatchesStatus(absl::StatusCode::kOutOfRange,
"JSON Pointer \"/3\" is out-of-range for array of size 3"));
EXPECT_THAT(Dereference({1, 2, 3}, "/a", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/a\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/ 1", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/ 1\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/00", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/00\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/", kMustExist),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/\" is invalid for array value"));
EXPECT_THAT(Dereference({1, 2, 3}, "/-", kMustExist),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/-\" refers to non-existent array element"));
EXPECT_THAT(Dereference({1, {{"a", 7}, {"b", 8}}, 3}, "/1/a", kMustExist),
Optional(Pointee(7)));
EXPECT_THAT(
Dereference({1, {{"a", 7}, {"b", 8}}, 3}, "/1/c", kMustExist),
MatchesStatus(
absl::StatusCode::kNotFound,
"JSON Pointer \"/1/c\" refers to non-existent object member"));
EXPECT_THAT(
Dereference({1, {{"a", 7}, {"b", 8}}, 3}, "/1/c", kMustExist),
MatchesStatus(
absl::StatusCode::kNotFound,
"JSON Pointer \"/1/c\" refers to non-existent object member"));
EXPECT_THAT(
Dereference(::nlohmann::json::value_t::discarded, "/a/b", kMustExist),
MatchesStatus(absl::StatusCode::kNotFound, ""));
}
TEST(DereferenceTest, NonConstAccess) {
{
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Dereference(doc, "/-", kCreate));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{
1, 2, 3, ::nlohmann::json::value_t::discarded}));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kCreate), Optional(&doc));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kMustExist),
MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kDelete), Optional(nullptr));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "/a", kDelete), Optional(nullptr));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
EXPECT_THAT(Dereference(doc, "", kSimulateCreate), Optional(&doc));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
{
::nlohmann::json doc(::nlohmann::json::value_t::discarded);
TENSORSTORE_EXPECT_OK(Dereference(doc, "/a/b/c", kCreate));
EXPECT_THAT(
doc,
MatchesJson(::nlohmann::json{
{"a", {{"b", {{"c", ::nlohmann::json::value_t::discarded}}}}}}));
}
{
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Dereference(doc, "/-/x", kCreate));
EXPECT_THAT(doc,
MatchesJson(::nlohmann::json{
1, 2, 3, {{"x", ::nlohmann::json::value_t::discarded}}}));
}
{
::nlohmann::json doc{1, 2, 3};
EXPECT_THAT(
Dereference(doc, "/-/a", kMustExist),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"JSON Pointer \"/-\" refers to non-existent array element"));
}
}
TEST(ReplaceTest, ReplaceEntireValue) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "", 42));
EXPECT_THAT(doc, MatchesJson(42));
}
TEST(ReplaceTest, DeleteEntireValue) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json::value_t::discarded));
}
TEST(ReplaceTest, ReplaceArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/1", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 42, 3}));
}
TEST(ReplaceTest, ReplaceNestedWithinArrayElement) {
::nlohmann::json doc{1, {{"a", 2}}, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/1/a", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, {{"a", 42}}, 3}));
}
TEST(ReplaceTest, DeleteNestedWithinArrayElement) {
::nlohmann::json doc{1, {{"a", 2}}, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/1/a", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(
doc, MatchesJson(::nlohmann::json{1, ::nlohmann::json::object_t(), 3}));
}
TEST(ReplaceTest, AppendNestedMember) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/-/a/b/c", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{
1, 2, 3, {{"a", {{"b", {{"c", 42}}}}}}}));
}
TEST(ReplaceTest, ReplaceNestedMember) {
::nlohmann::json doc{1, {{"d", false}}, 3};
TENSORSTORE_EXPECT_OK(Replace(doc, "/1/a/b/c", 42));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{
1, {{"a", {{"b", {{"c", 42}}}}}, {"d", false}}, 3}));
}
TEST(ReplaceTest, DeleteNestedMember) {
::nlohmann::json doc{{"a", {{"b", {{"c", 42}}}}}, {"d", false}};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/a/b/c", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc,
MatchesJson(::nlohmann::json{
{"a", {{"b", ::nlohmann::json::object_t()}}}, {"d", false}}));
}
TEST(ReplaceTest, DeleteMissingMember) {
::nlohmann::json doc{{"a", {{"b", {{"c", 42}}}}}, {"d", false}};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/a/e", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{{"a", {{"b", {{"c", 42}}}}},
{"d", false}}));
}
TEST(ReplaceTest, DeleteMissingNestedMember) {
::nlohmann::json doc{{"a", {{"b", {{"c", 42}}}}}, {"d", false}};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/a/e/f", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{{"a", {{"b", {{"c", 42}}}}},
{"d", false}}));
}
TEST(ReplaceTest, DeleteArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/1", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 3}));
}
TEST(ReplaceTest, DeleteNewArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/-", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 2, 3}));
}
TEST(ReplaceTest, DeleteOutOfRangeArrayElement) {
::nlohmann::json doc{1, 2, 3};
TENSORSTORE_EXPECT_OK(
Replace(doc, "/4", ::nlohmann::json::value_t::discarded));
EXPECT_THAT(doc, MatchesJson(::nlohmann::json{1, 2, 3}));
}
TEST(ReplaceTest, DeleteInvalidElement) {
::nlohmann::json doc(false);
EXPECT_THAT(Replace(doc, "/4", ::nlohmann::json::value_t::discarded),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/4\" cannot be applied "
"to boolean value: false"));
EXPECT_THAT(doc, MatchesJson(false));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_pointer.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_pointer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
fe174884-e168-4067-9989-54ac45037e21 | cpp | google/tensorstore | env | tensorstore/internal/env.cc | tensorstore/internal/env_test.cc | #include "tensorstore/internal/env.h"
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <processenv.h>
#endif
#include <stddef.h>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#ifndef _WIN32
extern char** environ;
#endif
namespace tensorstore {
namespace internal {
absl::flat_hash_map<std::string, std::string> GetEnvironmentMap() {
absl::flat_hash_map<std::string, std::string> result;
#if _WIN32
char* envblock = GetEnvironmentStrings();
for (auto p = envblock; *p; ) {
if (const char* eq = strchr(p, '=')) {
result[std::string(p, eq - p)] = eq + 1;
}
p += strlen(p) + 1;
}
FreeEnvironmentStrings(envblock);
#else
for (auto p = environ; *p; ++p) {
if (const char* eq = strchr(*p, '=')) {
result[std::string(*p, eq - *p)] = eq + 1;
}
}
#endif
return result;
}
std::optional<std::string> GetEnv(char const* variable) {
#if _WIN32
char* buffer;
size_t size;
_dupenv_s(&buffer, &size, variable);
std::unique_ptr<char, decltype(&free)> release(buffer, &free);
#else
char* buffer = std::getenv(variable);
#endif
if (buffer == nullptr) {
return std::optional<std::string>();
}
return std::optional<std::string>(std::string{buffer});
}
void SetEnv(const char* variable, const char* value) {
#if _WIN32
::_putenv_s(variable, value);
#else
::setenv(variable, value, 1);
#endif
}
void UnsetEnv(const char* variable) {
#if _WIN32
::_putenv_s(variable, "");
#else
::unsetenv(variable);
#endif
}
}
} | #include "tensorstore/internal/env.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::GetEnvironmentMap;
using ::tensorstore::internal::GetEnvValue;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
TEST(GetEnvTest, Basic) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "test env var");
{
auto var = GetEnv("TENSORSTORE_TEST_ENV_VAR");
EXPECT_TRUE(var);
EXPECT_EQ("test env var", *var);
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnv("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
TEST(GetEnvTest, GetEnvironmentMap) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "test env var");
auto allenv = GetEnvironmentMap();
EXPECT_FALSE(allenv.empty());
EXPECT_THAT(allenv.count("TENSORSTORE_TEST_ENV_VAR"), 1);
}
TEST(GetEnvTest, ParseBool) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "trUe");
{
EXPECT_THAT(GetEnvValue<bool>("TENSORSTORE_TEST_ENV_VAR"),
testing::Optional(true));
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnvValue<bool>("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
TEST(GetEnvTest, ParseInt) {
SetEnv("TENSORSTORE_TEST_ENV_VAR", "123");
{
EXPECT_THAT(GetEnvValue<int>("TENSORSTORE_TEST_ENV_VAR"),
testing::Optional(123));
}
UnsetEnv("TENSORSTORE_TEST_ENV_VAR");
{
auto var = GetEnvValue<int>("TENSORSTORE_TEST_ENV_VAR");
EXPECT_FALSE(var);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/env.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/env_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e55f37b6-c6bc-4718-9183-d7c4c498be27 | cpp | google/tensorstore | prometheus | tensorstore/internal/metrics/prometheus.cc | tensorstore/internal/metrics/prometheus_test.cc | #include "tensorstore/internal/metrics/prometheus.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_metrics {
namespace {
static inline constexpr internal::AsciiSet kDigit{"0123456789"};
static inline constexpr internal::AsciiSet kMetricFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_:"};
static inline constexpr internal::AsciiSet kLabelFirst{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"};
static inline constexpr internal::AsciiSet kValueUnreserved{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_.~()"};
bool IsLegalPrometheusLabel(std::string_view label) {
if (label.empty() || !kLabelFirst.Test(label[0])) return false;
for (char c : label) {
if (!kLabelFirst.Test(c) && !kDigit.Test(c)) return false;
}
return true;
}
absl::Status AppendLabelValue(std::string* url, std::string_view label,
std::string_view value) {
if (!IsLegalPrometheusLabel(label)) {
return absl::InvalidArgumentError("");
}
if (value.empty()) {
absl::StrAppend(url, "/", label, "@base64/=");
}
for (char c : value) {
if (!kValueUnreserved.Test(c)) {
absl::StrAppend(url, "/", label, "@base64/",
absl::WebSafeBase64Escape(value));
return absl::OkStatus();
}
}
absl::StrAppend(url, "/", label, "/", value);
return absl::OkStatus();
}
std::string AsPrometheusString(std::string_view in, internal::AsciiSet first) {
while (!in.empty() && !first.Test(in[0])) {
in = in.substr(1);
}
while (!in.empty() && !first.Test(in[in.size() - 1]) &&
!kDigit.Test(in[in.size() - 1])) {
in = in.substr(0, in.size() - 1);
}
std::string raw(in);
for (char& c : raw) {
if (!first.Test(c) && !kDigit.Test(c)) c = '_';
}
return raw;
}
struct PrometheusValueLine {
const std::string& metric_name;
const char* suffix;
const std::string& label_str;
std::string operator()(int64_t x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(double x) {
return absl::StrCat(metric_name, suffix, label_str.empty() ? "" : "{",
label_str, label_str.empty() ? "" : "} ", x);
}
std::string operator()(const std::string& x) { return {}; }
std::string operator()(std::monostate) { return {}; }
};
}
Result<internal_http::HttpRequest> BuildPrometheusPushRequest(
const PushGatewayConfig& config) {
if (config.job.empty()) {
return absl::InvalidArgumentError("PushGatewayConfig bad job");
}
if (!absl::StartsWith(config.host, "http:
!absl::StartsWith(config.host, "https:
return absl::InvalidArgumentError("PushGatewayConfig bad host");
}
std::string url = config.host;
if (!absl::EndsWith(url, "/")) {
absl::StrAppend(&url, "/metrics");
} else {
absl::StrAppend(&url, "metrics");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, "job", config.job));
if (!config.instance.empty()) {
TENSORSTORE_RETURN_IF_ERROR(
AppendLabelValue(&url, "instance", config.instance));
}
for (const auto& [k, v] : config.additional_labels) {
if (absl::EqualsIgnoreCase("job", k) ||
absl::EqualsIgnoreCase("instance", k)) {
return absl::InvalidArgumentError(
"PushGatewayConfig additional_labels cannot contain job or instance");
}
TENSORSTORE_RETURN_IF_ERROR(AppendLabelValue(&url, k, v));
}
return internal_http::HttpRequestBuilder("PUT", std::move(url))
.BuildRequest();
}
void PrometheusExpositionFormat(
const CollectedMetric& metric,
absl::FunctionRef<void(std::string)> handle_line) {
std::string metric_name =
AsPrometheusString(metric.metric_name, kMetricFirst);
if (metric_name.empty()) return;
if ((metric.values.empty() && metric.histograms.empty()) ||
(!metric.values.empty() && !metric.histograms.empty())) {
return;
}
if (!metric.metadata.description.empty()) {
handle_line(
absl::StrCat("# HELP ", metric_name, " ", metric.metadata.description));
}
if (auto units_str = UnitsToString(metric.metadata.units);
!units_str.empty()) {
handle_line(absl::StrCat("# UNIT ", metric_name, " ", units_str));
}
std::vector<std::string> prometheus_fields;
prometheus_fields.reserve(metric.field_names.size());
for (size_t i = 0; i < metric.field_names.size(); ++i) {
prometheus_fields.push_back(
AsPrometheusString(metric.field_names[i], kLabelFirst));
}
auto build_label_str = [&](auto& v) -> std::string {
assert(metric.field_names.size() == v.fields.size());
if (v.fields.empty()) return {};
std::string label_str;
for (size_t i = 0; i < metric.field_names.size(); ++i) {
absl::StrAppend(&label_str, i == 0 ? "" : ", ", prometheus_fields[i],
"=\"", absl::CEscape(v.fields[i]), "\"");
}
return label_str;
};
if (!metric.values.empty()) {
std::string line;
for (const auto& v : metric.values) {
std::string label_str = build_label_str(v);
line =
std::visit(PrometheusValueLine{metric_name, " ", label_str}, v.value);
if (!line.empty()) {
handle_line(std::move(line));
}
line = std::visit(PrometheusValueLine{metric_name, "_max ", label_str},
v.max_value);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
if (!metric.histograms.empty()) {
handle_line(absl::StrCat("# TYPE ", metric_name, " histogram"));
std::string line;
for (const auto& v : metric.histograms) {
std::string label_str = build_label_str(v);
struct Histogram {
std::vector<int64_t> buckets;
};
line = PrometheusValueLine{metric_name, "_mean ", label_str}(v.mean);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_count ", label_str}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_variance ",
label_str}(v.sum_of_squared_deviation);
if (!line.empty()) {
handle_line(std::move(line));
}
line = PrometheusValueLine{metric_name, "_sum ",
label_str}(v.mean * v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) --end;
for (size_t i = 0; i < end; i++) {
std::string bucket_labels = absl::StrCat(
label_str, label_str.empty() ? "" : ", ", "le=\"", i, "\"");
line = PrometheusValueLine{metric_name, "_bucket ",
bucket_labels}(v.buckets[i]);
if (!line.empty()) {
handle_line(std::move(line));
}
}
std::string bucket_labels =
absl::StrCat(label_str, label_str.empty() ? "" : ", ", "le=\"+Inf\"");
line =
PrometheusValueLine{metric_name, "_bucket ", bucket_labels}(v.count);
if (!line.empty()) {
handle_line(std::move(line));
}
}
}
}
}
} | #include "tensorstore/internal/metrics/prometheus.h"
#include <stdint.h>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/collect.h"
#include "tensorstore/internal/metrics/metadata.h"
namespace {
using ::tensorstore::internal_metrics::BuildPrometheusPushRequest;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::PrometheusExpositionFormat;
using ::tensorstore::internal_metrics::PushGatewayConfig;
using ::tensorstore::internal_metrics::Units;
TEST(PrometheusTest, BuildPrometheusPushRequest) {
auto request = BuildPrometheusPushRequest(
PushGatewayConfig{"http:
EXPECT_TRUE(request.has_value());
EXPECT_EQ("http:
request->url);
}
TEST(PrometheusTest, PrometheusExpositionFormat) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::string> lines;
PrometheusExpositionFormat(
metric, [&](std::string line) { lines.push_back(std::move(line)); });
return lines;
};
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.metadata.units = Units::kBytes;
metric.tag = "tag";
EXPECT_THAT(format_lines(metric), ::testing::IsEmpty());
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
v.max_value = int64_t{2};
EXPECT_THAT(format_lines(metric),
::testing::ElementsAre("# HELP metric_name description",
"# UNIT metric_name bytes",
"metric_name {field_name=\"vv\"} 1",
"metric_name_max {field_name=\"vv\"} 2"));
}
TEST(PrometheusTest, PrometheusExpositionFormat_Histogram) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::string> lines;
PrometheusExpositionFormat(
metric, [&](std::string line) { lines.push_back(std::move(line)); });
return lines;
};
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.metadata.units = Units::kBytes;
metric.tag = "tag";
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
h.mean = 1;
h.sum_of_squared_deviation = 1;
h.buckets.push_back(0);
h.buckets.push_back(1);
EXPECT_THAT(format_lines(metric),
::testing::ElementsAre(
"# HELP metric_name description",
"# UNIT metric_name bytes",
"# TYPE metric_name histogram",
"metric_name_mean {field_name=\"hh\"} 1",
"metric_name_count {field_name=\"hh\"} 1",
"metric_name_variance {field_name=\"hh\"} 1",
"metric_name_sum {field_name=\"hh\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"0\"} 0",
"metric_name_bucket {field_name=\"hh\", le=\"1\"} 1",
"metric_name_bucket {field_name=\"hh\", le=\"+Inf\"} 1"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/prometheus.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/prometheus_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
51d9da99-2cb3-4a12-bed2-e9b5bb35d3ea | cpp | google/tensorstore | collect | tensorstore/internal/metrics/collect.cc | tensorstore/internal/metrics/collect_test.cc | #include "tensorstore/internal/metrics/collect.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include <nlohmann/json.hpp>
namespace tensorstore {
namespace internal_metrics {
namespace {
struct IsNonZero {
bool operator()(int64_t x) { return x != 0; }
bool operator()(double x) { return x != 0; }
bool operator()(const std::string& x) { return !x.empty(); }
bool operator()(std::monostate) { return false; }
};
struct VisitStrAppend {
std::string* line;
const char* before;
const char* after;
void operator()(int64_t x) { absl::StrAppend(line, before, x, after); }
void operator()(double x) { absl::StrAppend(line, before, x, after); }
void operator()(const std::string& x) {
absl::StrAppend(line, before, x, after);
}
void operator()(std::monostate) {}
};
struct VisitJsonDictify {
::nlohmann::json::object_t& dest;
const char* key;
void operator()(int64_t x) { dest[key] = x; }
void operator()(double x) { dest[key] = x; }
void operator()(const std::string& x) { dest[key] = x; }
void operator()(std::monostate) {}
};
}
bool IsCollectedMetricNonZero(const CollectedMetric& metric) {
if (!metric.values.empty()) {
for (const auto& v : metric.values) {
if (std::visit(IsNonZero{}, v.value)) return true;
if (std::visit(IsNonZero{}, v.max_value)) return true;
}
}
if (!metric.histograms.empty()) {
for (const auto& v : metric.histograms) {
if (v.count != 0) return true;
}
}
return false;
}
void FormatCollectedMetric(
const CollectedMetric& metric,
absl::FunctionRef<void(bool has_value, std::string formatted_line)>
handle_line) {
std::string field_names;
if (!metric.field_names.empty()) {
field_names = absl::StrJoin(metric.field_names, ", ");
}
auto metric_name_with_fields = [&](auto& v) -> std::string {
if (v.fields.empty()) return std::string(metric.metric_name);
return absl::StrCat(metric.metric_name, "<", field_names, ">[",
absl::StrJoin(v.fields, ", "), "]");
};
if (!metric.values.empty()) {
for (auto& v : metric.values) {
bool has_value = false;
std::string line = metric_name_with_fields(v);
if (std::holds_alternative<std::monostate>(v.max_value) &&
std::holds_alternative<std::monostate>(v.value)) {
} else {
has_value |= std::visit(IsNonZero{}, v.value);
has_value |= std::visit(IsNonZero{}, v.max_value);
if (std::holds_alternative<std::monostate>(v.max_value)) {
std::visit(VisitStrAppend{&line, "=", ""}, v.value);
} else if (std::holds_alternative<std::monostate>(v.value)) {
std::visit(VisitStrAppend{&line, "=", ""}, v.max_value);
} else {
std::visit(VisitStrAppend{&line, "={value=", ""}, v.value);
std::visit(VisitStrAppend{&line, ", max=", "}"}, v.max_value);
}
}
handle_line(has_value, std::move(line));
}
}
if (!metric.histograms.empty()) {
for (auto& v : metric.histograms) {
std::string line = metric_name_with_fields(v);
absl::StrAppend(&line, "={count=", v.count, " mean=", v.mean,
" buckets=[");
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) end--;
auto it = v.buckets.begin();
if (end > 0) {
absl::StrAppend(&line, *it);
}
for (size_t i = 1; i < end;) {
size_t j = std::min(i + 10, end);
absl::StrAppend(&line, ", ");
absl::StrAppend(&line, absl::StrJoin(it + i, it + j, ","));
i = j;
}
absl::StrAppend(&line, "]}");
handle_line(v.count, std::move(line));
}
}
}
::nlohmann::json CollectedMetricToJson(const CollectedMetric& metric) {
::nlohmann::json::object_t result;
result["name"] = metric.metric_name;
auto set_field_keys = [&](auto& v, ::nlohmann::json::object_t& h) {
assert(metric.field_names.size() == v.fields.size());
for (size_t i = 0; i < metric.field_names.size(); ++i) {
if (metric.field_names[i] == "value" ||
metric.field_names[i] == "count" ||
metric.field_names[i] == "max_value" ||
metric.field_names[i] == "sum") {
h[absl::StrCat("_", metric.field_names[i])] = v.fields[i];
} else {
h[std::string(metric.field_names[i])] = v.fields[i];
}
}
};
std::vector<::nlohmann::json> values;
if (!metric.values.empty()) {
for (const auto& v : metric.values) {
::nlohmann::json::object_t tmp{};
set_field_keys(v, tmp);
std::visit(VisitJsonDictify{tmp, "value"}, v.value);
std::visit(VisitJsonDictify{tmp, "max_value"}, v.max_value);
values.push_back(std::move(tmp));
}
}
if (!metric.histograms.empty()) {
for (const auto& v : metric.histograms) {
::nlohmann::json::object_t tmp{};
set_field_keys(v, tmp);
tmp["count"] = v.count;
tmp["mean"] = v.mean;
tmp["sum_of_squared_deviation"] = v.sum_of_squared_deviation;
size_t end = v.buckets.size();
while (end > 0 && v.buckets[end - 1] == 0) end--;
auto it = v.buckets.begin();
for (size_t i = 0; i < end; ++i) {
tmp[absl::StrCat(i)] = *it++;
}
values.push_back(std::move(tmp));
}
}
result["values"] = std::move(values);
return result;
}
}
} | #include "tensorstore/internal/metrics/collect.h"
#include <stdint.h>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::CollectedMetricToJson;
using ::tensorstore::internal_metrics::FormatCollectedMetric;
using ::tensorstore::internal_metrics::IsCollectedMetricNonZero;
using ::testing::ElementsAre;
using ::testing::Pair;
TEST(CollectTest, IsCollectedMetricNonZero) {
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
EXPECT_FALSE(IsCollectedMetricNonZero(metric));
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 0;
EXPECT_FALSE(IsCollectedMetricNonZero(metric));
h.count = 1;
EXPECT_TRUE(IsCollectedMetricNonZero(metric));
metric.histograms.clear();
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
EXPECT_TRUE(IsCollectedMetricNonZero(metric));
v.value = std::monostate{};
v.max_value = int64_t{1};
EXPECT_TRUE(IsCollectedMetricNonZero(metric));
v.max_value = std::monostate{};
}
TEST(CollectTest, FormatCollectedMetric) {
auto format_lines = [](const CollectedMetric& metric) {
std::vector<std::pair<bool, std::string>> lines;
FormatCollectedMetric(
metric, [&](bool has_value, std::string formatted_line) {
lines.push_back(std::make_pair(has_value, std::move(formatted_line)));
});
return lines;
};
EXPECT_THAT(format_lines({}), testing::IsEmpty());
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
{
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
EXPECT_THAT(format_lines(metric),
ElementsAre(Pair(true, "metric_name<field_name>[vv]=1")));
}
{
metric.values.clear();
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
EXPECT_THAT(format_lines(metric),
ElementsAre(Pair(true,
"metric_name<field_name>[hh]={count=1 "
"mean=0 buckets=[]}")));
}
}
TEST(CollectTest, CollectedMetricToJson) {
EXPECT_THAT(
CollectedMetricToJson({}),
MatchesJson({{"name", ""}, {"values", nlohmann::json::array_t()}}));
CollectedMetric metric;
metric.metric_name = "metric_name";
metric.field_names.push_back("field_name");
metric.metadata.description = "description";
metric.tag = "tag";
{
metric.values.push_back(CollectedMetric::Value{});
auto& v = metric.values.back();
v.fields.push_back("vv");
v.value = int64_t{1};
EXPECT_THAT(CollectedMetricToJson(metric),
MatchesJson({{"name", "metric_name"},
{"values",
{{
{"value", 1},
{"field_name", "vv"},
}}}}));
}
{
metric.values.clear();
metric.histograms.push_back(CollectedMetric::Histogram{});
auto& h = metric.histograms.back();
h.fields.push_back("hh");
h.count = 1;
EXPECT_THAT(CollectedMetricToJson(metric),
MatchesJson({{"name", "metric_name"},
{"values",
{{
{"count", 1},
{"field_name", "hh"},
{"mean", 0.0},
{"sum_of_squared_deviation", 0.0},
}}}}));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/collect.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/collect_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
cd6f7740-592e-4590-b580-e12b3fe549c5 | cpp | google/tensorstore | async_cache | tensorstore/internal/cache/async_cache.cc | tensorstore/internal/cache/async_cache_test.cc | #include "tensorstore/internal/cache/async_cache.h"
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstddef>
#include <functional>
#include <mutex>
#include <type_traits>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/batch_impl.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/compare.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace {
using Entry = AsyncCache::Entry;
using ReadState = AsyncCache::ReadState;
using TransactionNode = AsyncCache::TransactionNode;
using TransactionTree = AsyncCache::Entry::TransactionTree;
using PendingWritebackQueueAccessor =
TransactionNode::PendingWritebackQueueAccessor;
using PrepareForCommitState = TransactionNode::PrepareForCommitState;
constexpr absl::Duration kEpsilonDuration = absl::Nanoseconds(1);
void AcquireReadRequestReference(Entry& entry) {
internal::PinnedCacheEntry<AsyncCache>(&entry).release();
}
void ReleaseReadRequestReference(Entry& entry) {
internal::PinnedCacheEntry<AsyncCache>(&entry, internal::adopt_object_ref);
}
void AcquireReadRequestReference(TransactionNode& node) {
if (!node.transaction()->commit_started()) {
node.transaction()->AcquireCommitBlock();
}
intrusive_ptr_increment(&node);
}
void ReleaseReadRequestReference(TransactionNode& node) {
if (!node.transaction()->commit_started()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< node << "Releasing commit block";
node.transaction()->ReleaseCommitBlock();
}
intrusive_ptr_decrement(&node);
}
Future<const void> GetFuture(Promise<void>& promise) {
if (!promise.null()) {
auto future = promise.future();
if (!future.null()) return future;
}
auto pair = PromiseFuturePair<void>::Make();
promise = std::move(pair.promise);
return std::move(pair.future);
}
const AsyncCache::ReadRequestState& GetEffectiveReadRequestState(Entry& entry) {
return entry.read_request_state_;
}
const AsyncCache::ReadRequestState& GetEffectiveReadRequestState(
TransactionNode& node) {
return node.reads_committed_ ? GetOwningEntry(node).read_request_state_
: node.read_request_state_;
}
template <typename EntryOrNode>
void EntryOrNodeStartRead(EntryOrNode& entry_or_node,
UniqueWriterLock<Entry> lock, Batch::View batch) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
auto& request_state = entry_or_node.read_request_state_;
if (request_state.queued_request_is_deferred) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< entry_or_node << "EntryOrNodeStartRead: no pending read request";
return;
}
if (!request_state.queued.result_needed()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< entry_or_node
<< "EntryOrNodeStartRead: pending read request was cancelled";
request_state.queued = Promise<void>();
request_state.queued_request_is_deferred = true;
request_state.queued_time = absl::InfinitePast();
return;
}
assert(request_state.issued.null());
auto staleness_bound = request_state.issued_time =
std::exchange(request_state.queued_time, absl::InfinitePast());
request_state.issued = std::move(request_state.queued);
request_state.queued_request_is_deferred = true;
lock.unlock();
AcquireReadRequestReference(entry_or_node);
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< entry_or_node << "EntryOrNodeStartRead: calling DoRead";
AsyncCache::AsyncCacheReadRequest read_request;
read_request.staleness_bound = staleness_bound;
read_request.batch = batch;
entry_or_node.DoRead(std::move(read_request));
}
void MaybeStartReadOrWriteback(Entry& entry, UniqueWriterLock<Entry> lock,
Batch::View read_batch) {
auto& read_request_state = entry.read_request_state_;
if (TransactionNode* committing_transaction_node =
entry.committing_transaction_node_) {
TransactionNode* next;
while (true) {
const auto existing_prepare_for_commit_state =
committing_transaction_node->prepare_for_commit_state_.load(
std::memory_order_relaxed);
const bool read_request_issued = !read_request_state.issued.null();
PrepareForCommitState new_prepare_for_commit_state;
switch (existing_prepare_for_commit_state) {
case PrepareForCommitState::kNone:
case PrepareForCommitState::kPrepareDoneCalled:
new_prepare_for_commit_state =
PrepareForCommitState::kPrepareDoneCalled;
if (read_request_issued) break;
[[fallthrough]];
case PrepareForCommitState::kReadyForCommitCalled:
new_prepare_for_commit_state =
PrepareForCommitState::kReadyForCommitCalled;
}
committing_transaction_node->prepare_for_commit_state_ =
new_prepare_for_commit_state;
next =
PendingWritebackQueueAccessor::GetNext(committing_transaction_node);
if (next == committing_transaction_node ||
next->transaction() != committing_transaction_node->transaction() ||
next->prepare_for_commit_state_.load(std::memory_order_relaxed) ==
PrepareForCommitState::kReadyForCommitCalled) {
next = nullptr;
}
lock.unlock();
switch (existing_prepare_for_commit_state) {
case PrepareForCommitState::kNone:
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *committing_transaction_node << "PrepareDone";
committing_transaction_node->PrepareDone();
[[fallthrough]];
case PrepareForCommitState::kPrepareDoneCalled:
if (read_request_issued) return;
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *committing_transaction_node << "ReadyForCommit";
committing_transaction_node->ReadyForCommit();
break;
case PrepareForCommitState::kReadyForCommitCalled:
break;
}
if (!next) return;
committing_transaction_node = next;
lock = UniqueWriterLock<Entry>(entry);
}
}
if (read_request_state.issued.null()) {
EntryOrNodeStartRead(entry, std::move(lock), read_batch);
}
}
void MaybeIssueRead(Entry& entry, UniqueWriterLock<Entry> lock,
Batch::View batch) {
MaybeStartReadOrWriteback(entry, std::move(lock), batch);
}
void MaybeIssueRead(TransactionNode& node, UniqueWriterLock<Entry> lock,
Batch::View batch) {
if (!node.read_request_state_.issued.null()) return;
EntryOrNodeStartRead(node, std::move(lock), batch);
}
template <typename EntryOrNode>
void SetReadState(EntryOrNode& entry_or_node, ReadState&& read_state,
size_t read_state_size) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
if constexpr (std::is_same_v<EntryOrNode, TransactionNode>) {
if (entry_or_node.reads_committed_) {
assert(entry_or_node.prepare_for_commit_state_.load(
std::memory_order_relaxed) ==
PrepareForCommitState::kReadyForCommitCalled);
SetReadState(GetOwningEntry(entry_or_node), std::move(read_state),
read_state_size);
return;
}
}
entry_or_node.read_request_state_.known_to_be_stale = false;
entry_or_node.read_request_state_.read_state = std::move(read_state);
size_t change =
read_state_size -
std::exchange(entry_or_node.read_request_state_.read_state_size,
read_state_size);
if (change != 0) {
if constexpr (std::is_same_v<EntryOrNode, TransactionNode>) {
entry_or_node.UpdateSizeInBytes(change);
} else {
entry_or_node.NotifySizeChanged();
}
}
}
template <typename EntryOrNode>
class AsyncCacheBatchEntry : public Batch::Impl::Entry {
public:
using EntryOrNodePtr =
std::conditional_t<std::is_same_v<EntryOrNode, AsyncCache::Entry>,
PinnedCacheEntry<AsyncCache>,
OpenTransactionNodePtr<AsyncCache::TransactionNode>>;
using KeyParam = internal_future::FutureStateBase*;
explicit AsyncCacheBatchEntry(size_t nesting_depth,
EntryOrNode& entry_or_node,
Promise<void> promise)
: Batch::Impl::Entry(nesting_depth),
entry_or_node_(&entry_or_node),
promise_(std::move(promise)) {}
KeyParam key() const { return &internal_future::FutureAccess::rep(promise_); }
private:
void Submit(Batch::View batch) override {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *entry_or_node_ << "Submitting batch read";
auto& entry = GetOwningEntry(*entry_or_node_);
UniqueWriterLock lock(entry);
auto& read_request_state = entry_or_node_->read_request_state_;
if (!HaveSameSharedState(read_request_state.queued, promise_)) {
return;
}
read_request_state.queued_request_is_deferred = false;
MaybeIssueRead(*entry_or_node_, std::move(lock), batch);
delete this;
}
EntryOrNodePtr entry_or_node_;
Promise<void> promise_;
};
template <typename EntryOrNode>
Future<const void> RequestRead(EntryOrNode& entry_or_node,
AsyncCache::AsyncCacheReadRequest options,
bool must_not_be_known_to_be_stale) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
auto& entry = GetOwningEntry(entry_or_node);
UniqueWriterLock lock(entry);
auto& effective_request_state = GetEffectiveReadRequestState(entry_or_node);
const auto existing_time = effective_request_state.read_state.stamp.time;
if (existing_time != absl::InfinitePast() &&
existing_time >= options.staleness_bound) {
if (must_not_be_known_to_be_stale &&
effective_request_state.known_to_be_stale) {
options.staleness_bound = existing_time + kEpsilonDuration;
} else {
return MakeReadyFuture();
}
}
auto& request_state = entry_or_node.read_request_state_;
request_state.queued_time =
std::max(request_state.queued_time,
std::min(options.staleness_bound, absl::Now()));
if (!request_state.issued.null() &&
request_state.issued_time >= options.staleness_bound) {
return GetFuture(request_state.issued);
}
auto future = GetFuture(request_state.queued);
if (options.batch.deferred() && request_state.queued_request_is_deferred) {
using BatchE = AsyncCacheBatchEntry<EntryOrNode>;
auto& promise = request_state.queued;
Batch::Impl::From(options.batch)
->GetEntry<BatchE>(&internal_future::FutureAccess::rep(promise), [&] {
return std::make_unique<BatchE>(
GetOwningCache(entry).BatchNestingDepth(), entry_or_node,
promise);
});
} else {
request_state.queued_request_is_deferred = false;
}
MaybeIssueRead(entry_or_node, std::move(lock), options.batch);
return future;
}
class QueuedReadHandler {
public:
explicit QueuedReadHandler(AsyncCache::ReadRequestState& request_state,
absl::Time time) {
if (!request_state.queued.null() && time >= request_state.queued_time) {
queued_ = std::move(request_state.queued);
request_state.queued_time = absl::InfinitePast();
request_state.queued_request_is_deferred = true;
}
}
~QueuedReadHandler() {
if (!queued_.null()) {
queued_.SetResult(tensorstore::MakeResult());
}
}
private:
Promise<void> queued_;
};
template <typename EntryOrNode>
void ResolveIssuedRead(EntryOrNode& entry_or_node, absl::Status status,
UniqueWriterLock<Entry> lock) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
auto& request_state = entry_or_node.read_request_state_;
auto issued = std::move(request_state.issued);
auto time = GetEffectiveReadRequestState(entry_or_node).read_state.stamp.time;
assert(!issued.null());
assert(!status.ok() || time >= request_state.issued_time);
{
QueuedReadHandler queued_read_handler(request_state, time);
MaybeIssueRead(entry_or_node, std::move(lock), {});
issued.SetResult(tensorstore::MakeResult(status));
}
ReleaseReadRequestReference(entry_or_node);
}
size_t GetReadStateSize(Entry& entry, const void* read_data) {
if (!read_data) return 0;
return entry.ComputeReadDataSizeInBytes(read_data);
}
template <typename EntryOrNode>
void EntryOrNodeReadSuccess(EntryOrNode& entry_or_node,
ReadState&& read_state) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
Entry& entry = GetOwningEntry(entry_or_node);
const size_t read_state_size = GetReadStateSize(entry, read_state.data.get());
UniqueWriterLock lock(entry);
assert(read_state.stamp.time != absl::InfinitePast());
assert(!StorageGeneration::IsUnknown(read_state.stamp.generation));
SetReadState(entry_or_node, std::move(read_state), read_state_size);
ResolveIssuedRead(entry_or_node, absl::OkStatus(), std::move(lock));
}
template <typename EntryOrNode>
void EntryOrNodeReadError(EntryOrNode& entry_or_node, absl::Status error) {
static_assert(std::is_same_v<EntryOrNode, Entry> ||
std::is_same_v<EntryOrNode, TransactionNode>);
assert(!error.ok());
ResolveIssuedRead(entry_or_node, std::move(error),
UniqueWriterLock{GetOwningEntry(entry_or_node)});
}
void RemoveTransactionFromMap(TransactionNode& node) {
if (TransactionTree::IsDisconnected(node)) {
return;
}
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< node << "RemoveTransactionFromMap";
GetOwningEntry(node).transactions_.Remove(node);
}
void ResolveIssuedWriteback(AsyncCache::TransactionNode& node,
UniqueWriterLock<Entry> lock) {
auto& entry = GetOwningEntry(node);
assert(node.prepare_for_commit_state_.load(std::memory_order_relaxed) ==
PrepareForCommitState::kReadyForCommitCalled);
assert(entry.committing_transaction_node_ &&
entry.committing_transaction_node_->transaction() ==
node.transaction());
assert(entry.read_request_state_.issued.null());
if (entry.committing_transaction_node_ != &node) {
intrusive_linked_list::Remove(PendingWritebackQueueAccessor{}, &node);
} else {
auto* next_node = PendingWritebackQueueAccessor::GetNext(&node);
if (next_node != &node) {
intrusive_linked_list::Remove(PendingWritebackQueueAccessor{}, &node);
if (next_node->transaction() == node.transaction()) {
entry.committing_transaction_node_ = next_node;
} else {
entry.committing_transaction_node_ = next_node;
}
} else {
entry.committing_transaction_node_ = nullptr;
}
}
RemoveTransactionFromMap(node);
MaybeStartReadOrWriteback(entry, std::move(lock), {});
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << node << "CommitDone";
node.CommitDone();
}
}
const ReadState& AsyncCache::ReadState::Unknown() {
static const absl::NoDestructor<ReadState> read_state;
return *read_state;
}
size_t AsyncCache::Entry::ComputeReadDataSizeInBytes(const void* data) {
return 0;
}
size_t AsyncCache::DoGetFixedSizeInBytes(Cache::Entry* entry) {
return this->Cache::DoGetSizeInBytes(entry);
}
size_t AsyncCache::DoGetSizeInBytes(Cache::Entry* base_entry) {
auto* entry = static_cast<Entry*>(base_entry);
return this->DoGetFixedSizeInBytes(entry) +
entry->read_request_state_.read_state_size;
}
Future<const void> AsyncCache::Entry::Read(AsyncCacheReadRequest request,
bool must_not_be_known_to_be_stale) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Read: staleness_bound=" << request.staleness_bound
<< ", must_not_be_known_to_be_stale=" << must_not_be_known_to_be_stale;
return RequestRead(*this, request, must_not_be_known_to_be_stale);
}
void AsyncCache::Entry::ReadSuccess(ReadState&& read_state) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadSuccess: " << read_state.stamp
<< ", data=" << read_state.data.get();
internal::EntryOrNodeReadSuccess(*this, std::move(read_state));
}
void AsyncCache::Entry::ReadError(absl::Status error) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadError: error=" << error;
internal::EntryOrNodeReadError(*this, std::move(error));
}
AsyncCache::TransactionNode::TransactionNode(Entry& entry)
: internal::TransactionState::Node(Cache::PinnedEntry(&entry).release()),
reads_committed_(false),
size_updated_(false) {}
Future<const void> AsyncCache::TransactionNode::Read(
AsyncCacheReadRequest request, bool must_not_be_known_to_be_stale) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Read: staleness_bound=" << request.staleness_bound
<< ", must_not_be_known_to_be_stale=" << must_not_be_known_to_be_stale;
if (reads_committed_ &&
(prepare_for_commit_state_.load(std::memory_order_acquire) !=
PrepareForCommitState::kReadyForCommitCalled)) {
return RequestRead(GetOwningEntry(*this), request,
must_not_be_known_to_be_stale);
}
return RequestRead(*this, request, must_not_be_known_to_be_stale);
}
void AsyncCache::TransactionNode::ReadSuccess(ReadState&& read_state) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadSuccess: " << read_state.stamp
<< ", data=" << read_state.data.get();
internal::EntryOrNodeReadSuccess(*this, std::move(read_state));
}
void AsyncCache::TransactionNode::ReadError(absl::Status error) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "ReadError: error=" << error;
internal::EntryOrNodeReadError(*this, std::move(error));
}
void AsyncCache::TransactionNode::PrepareForCommit() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "PrepareForCommit";
intrusive_ptr_increment(this);
auto& entry = GetOwningEntry(*this);
UniqueWriterLock lock(entry);
RemoveTransactionFromMap(*this);
if (entry.committing_transaction_node_) {
intrusive_linked_list::InsertBefore(PendingWritebackQueueAccessor{},
entry.committing_transaction_node_,
this);
if (entry.committing_transaction_node_->transaction() != transaction()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Commit: enqueuing for writeback";
return;
}
assert(entry.committing_transaction_node_->prepare_for_commit_state_.load(
std::memory_order_relaxed) >=
PrepareForCommitState::kPrepareDoneCalled);
} else {
intrusive_linked_list::Initialize(PendingWritebackQueueAccessor{}, this);
}
entry.committing_transaction_node_ = this;
MaybeStartReadOrWriteback(entry, std::move(lock), {});
}
void AsyncCache::TransactionNode::Abort() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "Abort";
auto& entry = GetOwningEntry(*this);
UniqueWriterLock lock(entry);
RemoveTransactionFromMap(*this);
lock.unlock();
AbortDone();
}
void AsyncCache::TransactionNode::WritebackSuccess(ReadState&& read_state) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "WritebackSuccess: " << read_state.stamp
<< ", data=" << read_state.data.get();
auto& entry = GetOwningEntry(*this);
const size_t read_state_size = GetReadStateSize(entry, read_state.data.get());
UniqueWriterLock lock{entry};
auto& request_state = entry.read_request_state_;
absl::Time read_state_time = read_state.stamp.time;
if (!StorageGeneration::IsUnknown(read_state.stamp.generation)) {
assert(read_state.stamp.generation != StorageGeneration::Invalid());
assert(read_state_time != absl::InfinitePast());
assert(read_state_time >= request_state.read_state.stamp.time);
SetReadState(entry, std::move(read_state), read_state_size);
} else if (read_state_time > request_state.read_state.stamp.time) {
request_state.known_to_be_stale = true;
}
QueuedReadHandler queued_read_handler(request_state, read_state_time);
ResolveIssuedWriteback(*this, std::move(lock));
}
void AsyncCache::TransactionNode::WritebackError() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "WritebackError";
ResolveIssuedWriteback(*this, UniqueWriterLock{GetOwningEntry(*this)});
}
Result<OpenTransactionNodePtr<AsyncCache::TransactionNode>>
AsyncCache::Entry::GetTransactionNodeImpl(OpenTransactionPtr& transaction) {
constexpr auto EnsureTransactionNodeInitialized =
[](AsyncCache::TransactionNode& node,
OpenTransactionPtr& transaction) -> bool {
auto& entry = GetOwningEntry(node);
bool initialized = false;
absl::call_once(node.initialized_, [&] {
const bool new_implicit_transaction = !transaction;
node.initialized_status_ = node.DoInitialize(transaction);
if (node.initialized_status_.ok()) {
if (new_implicit_transaction) {
node.SetTransaction(GetOrCreateOpenTransaction(transaction));
UniqueWriterLock lock(entry);
entry.transactions_.FindOrInsert(
[&](TransactionNode& existing_node) {
return internal::DoThreeWayComparison(
std::less<>{}, transaction.get(),
existing_node.transaction());
},
[&] { return &node; });
}
assert(node.transaction() == transaction.get());
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< node << "New node, new implicit=" << new_implicit_transaction
<< ", transaction=" << transaction.get();
node.initialized_status_ = node.Register();
} else if (!new_implicit_transaction) {
UniqueWriterLock lock(entry);
RemoveTransactionFromMap(node);
}
initialized = true;
});
return initialized;
};
WeakTransactionNodePtr<TransactionNode> node;
if (!transaction) {
WeakTransactionNodePtr<TransactionNode> stale_node;
while (true) {
node.reset(GetOwningCache(*this).DoAllocateTransactionNode(*this));
[[maybe_unused]] bool initialized =
EnsureTransactionNodeInitialized(*node, transaction);
TENSORSTORE_RETURN_IF_ERROR(node->initialized_status_);
assert(initialized);
if (node->IsRevoked()) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *node << "Node is revoked";
std::swap(stale_node, node);
continue;
}
node->transaction()->RequestCommit();
break;
}
} else {
size_t min_phase = transaction->phase();
WeakTransactionNodePtr<TransactionNode> stale_node;
while (true) {
UniqueWriterLock lock(*this);
const auto MakeNode = [&] {
auto* node = GetOwningCache(*this).DoAllocateTransactionNode(*this);
node->SetTransaction(*transaction);
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *node << "Adding transaction to map";
return node;
};
auto* candidate_node =
transactions_
.FindOrInsert(
[transaction = transaction.get()](TransactionNode& node) {
return internal::DoThreeWayComparison(
std::less<>{}, transaction, node.transaction());
},
MakeNode)
.first;
if (candidate_node == stale_node.get()) {
auto* new_node = MakeNode();
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *candidate_node << "Replacing in map";
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *new_node << "Adding to map";
transactions_.Replace(*candidate_node, *new_node);
candidate_node = new_node;
}
node.reset(candidate_node);
lock.unlock();
stale_node.reset();
EnsureTransactionNodeInitialized(*node, transaction);
TENSORSTORE_RETURN_IF_ERROR(node->initialized_status_);
if (node->phase() >= min_phase && !node->IsRevoked()) {
break;
}
stale_node = std::move(node);
}
}
OpenTransactionPtr(node->transaction()).release();
return OpenTransactionNodePtr<TransactionNode>(node.release(),
internal::adopt_object_ref);
}
void AsyncCache::TransactionNode::Commit() { intrusive_ptr_decrement(this); }
void AsyncCache::TransactionNode::WriterLock() { mutex_.WriterLock(); }
void AsyncCache::TransactionNode::WriterUnlock() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "unlock";
UniqueWriterLock lock(mutex_, std::adopt_lock);
if (!size_updated_) return;
size_updated_ = false;
const size_t new_size = this->ComputeWriteStateSizeInBytes();
const size_t change = new_size - std::exchange(write_state_size_, new_size);
if (change == 0) return;
this->UpdateSizeInBytes(change);
}
bool AsyncCache::TransactionNode::try_lock() {
mutex_.WriterLock();
if (!IsRevoked()) return true;
mutex_.WriterUnlock();
return false;
}
size_t AsyncCache::TransactionNode::ComputeWriteStateSizeInBytes() { return 0; }
absl::Status AsyncCache::TransactionNode::DoInitialize(
internal::OpenTransactionPtr& transaction) {
return absl::OkStatus();
}
void AsyncCache::TransactionNode::DoApply(ApplyOptions options,
ApplyReceiver receiver) {
ABSL_UNREACHABLE();
}
void AsyncCache::TransactionNode::Revoke() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "Revoke";
revoked_.store(true, std::memory_order_release);
}
void AsyncCache::TransactionNode::InvalidateReadState() {
assert(this->transaction()->commit_started());
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "InvalidateReadState";
this->read_request_state_.read_state = ReadState{};
}
AsyncCache::TransactionNode::~TransactionNode() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "~TransactionNode";
Cache::PinnedEntry(static_cast<Cache::Entry*>(associated_data()),
adopt_object_ref);
}
#ifdef TENSORSTORE_ASYNC_CACHE_DEBUG
AsyncCache::Entry::~Entry() {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG) << *this << "~Entry";
}
#endif
}
} | #include "tensorstore/internal/cache/async_cache.h"
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Future;
using ::tensorstore::no_transaction;
using ::tensorstore::Transaction;
using ::tensorstore::UniqueWriterLock;
using ::tensorstore::internal::AsyncCache;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::OpenTransactionPtr;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::TransactionState;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal::WeakTransactionNodePtr;
using ::tensorstore::internal_testing::TestConcurrent;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
struct RequestLog {
struct ReadRequest {
AsyncCache::Entry* entry;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
entry->ReadSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) { entry->ReadError(std::move(error)); }
};
struct TransactionReadRequest {
AsyncCache::TransactionNode* node;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
node->ReadSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) { node->ReadError(std::move(error)); }
};
struct WritebackRequest {
AsyncCache::TransactionNode* node;
void Success(absl::Time time = absl::Now(),
std::shared_ptr<const size_t> value = {}) {
node->WritebackSuccess(
{std::move(value),
{tensorstore::StorageGeneration::FromString("g"), time}});
}
void Error(absl::Status error) {
node->SetError(error);
node->WritebackError();
}
};
tensorstore::internal::ConcurrentQueue<ReadRequest> reads;
tensorstore::internal::ConcurrentQueue<TransactionReadRequest>
transaction_reads;
tensorstore::internal::ConcurrentQueue<WritebackRequest> writebacks;
void HandleWritebacks() {
while (auto req = writebacks.pop_nonblock()) {
req->Success();
}
}
};
class TestCache : public tensorstore::internal::AsyncCache {
using Base = tensorstore::internal::AsyncCache;
public:
using ReadData = size_t;
class Entry : public AsyncCache::Entry {
public:
using OwningCache = TestCache;
auto CreateWriteTransaction(OpenTransactionPtr transaction = {}) {
return GetTransactionNode(*this, transaction).value();
}
Future<const void> CreateWriteTransactionFuture(
OpenTransactionPtr transaction = {}) {
return CreateWriteTransaction(std::move(transaction))
->transaction()
->future();
}
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).log_->reads.push(RequestLog::ReadRequest{this});
}
size_t ComputeReadDataSizeInBytes(const void* data) override {
return *static_cast<const size_t*>(data);
}
absl::Status do_initialize_transaction_error;
bool share_implicit_transaction_nodes = true;
};
class TransactionNode : public Base::TransactionNode {
public:
using OwningCache = TestCache;
using Base::TransactionNode::TransactionNode;
absl::Status DoInitialize(OpenTransactionPtr& transaction) override {
TENSORSTORE_RETURN_IF_ERROR(
this->Base::TransactionNode::DoInitialize(transaction));
auto& entry = GetOwningEntry(*this);
++value;
SetReadsCommitted();
return entry.do_initialize_transaction_error;
}
void DoRead(AsyncCacheReadRequest request) override {
GetOwningCache(*this).log_->transaction_reads.push(
RequestLog::TransactionReadRequest{this});
}
void Commit() override {
GetOwningCache(*this).log_->writebacks.push(
RequestLog::WritebackRequest{this});
Base::TransactionNode::Commit();
}
size_t ComputeWriteStateSizeInBytes() override { return size; }
int value = 0;
size_t size = 0;
};
TestCache(RequestLog* log) : log_(log) {}
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
private:
RequestLog* log_;
};
TEST(AsyncCacheTest, ReadBasic) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
absl::Time read_time1, read_time2;
{
auto init_time = absl::Now();
auto read_future = entry->Read({init_time});
ASSERT_FALSE(read_future.ready());
{
auto read_future2 = entry->Read({init_time});
EXPECT_TRUE(HaveSameSharedState(read_future, read_future2));
}
ASSERT_EQ(1u, log.reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time1 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(absl::InfinitePast(),
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time1);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
{
auto read_future3 = entry->Read({read_time1});
ASSERT_TRUE(read_future3.ready());
TENSORSTORE_EXPECT_OK(read_future3);
ASSERT_TRUE(log.reads.empty());
ASSERT_TRUE(log.writebacks.empty());
}
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1u, log.reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time2 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time1,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = UniqueNow();
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
EXPECT_FALSE(HaveSameSharedState(read_future, read_future1));
{
auto read_future2 = entry->Read({absl::InfiniteFuture()});
EXPECT_TRUE(HaveSameSharedState(read_future1, read_future2));
}
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time2,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto read_time2 = absl::Now();
{
auto read_req = log.reads.pop();
EXPECT_EQ(read_time,
AsyncCache::ReadLock<void>(*read_req.entry).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
auto read_future1 = entry->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = absl::Now();
{
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
{
auto read_future1 = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
auto read_future1 = entry->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, ReadFailed) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
const auto read_status = absl::UnknownError("read failed");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Error(read_status);
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
EXPECT_EQ(read_status, read_future.status());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, ReadFailedAfterSuccessfulRead) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
const auto read_status = absl::UnknownError("read failed");
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Error(read_status);
}
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
EXPECT_EQ(read_status, read_future.status());
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
}
TEST(AsyncCacheTest, NonTransactionalWrite) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
Future<const void> write_future;
{
auto node = entry->CreateWriteTransaction();
weak_node.reset(node.get());
write_future = node->transaction()->future();
}
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, NonTransactionalWriteback) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto write_future = entry->CreateWriteTransactionFuture();
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
auto write_time = absl::Now();
{
auto write_req = log.writebacks.pop();
write_req.Success(write_time);
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_future = entry->Read({write_time});
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
EXPECT_FALSE(read_future.ready());
auto read_req = log.reads.pop();
read_req.Success();
EXPECT_TRUE(read_future.ready());
}
}
TEST(AsyncCacheTest, WritebackRequestedWithReadIssued) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto write_future = entry->CreateWriteTransactionFuture();
write_future.Force();
ASSERT_FALSE(write_future.ready());
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.reads.pop();
read_req.Success();
}
ASSERT_FALSE(write_future.ready());
ASSERT_TRUE(read_future.ready());
TENSORSTORE_ASSERT_OK(read_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, WritebackRequestedByCache) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto write_future = entry->CreateWriteTransactionFuture();
ASSERT_FALSE(write_future.ready());
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(1, log.writebacks.size());
{
auto write_req = log.writebacks.pop();
write_req.Success();
}
ASSERT_TRUE(write_future.ready());
TENSORSTORE_ASSERT_OK(write_future);
ASSERT_EQ(0, log.reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
TEST(AsyncCacheTest, TransactionalReadBasic) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node.reset(node.get());
}
absl::Time read_time1, read_time2;
auto commit_future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
{
auto init_time = absl::Now();
auto read_future = weak_node->Read({init_time});
ASSERT_FALSE(read_future.ready());
{
auto read_future2 = weak_node->Read({init_time});
EXPECT_TRUE(HaveSameSharedState(read_future, read_future2));
}
ASSERT_EQ(1u, log.transaction_reads.size());
read_time1 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(absl::InfinitePast(),
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time1);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
{
auto read_future3 = weak_node->Read({read_time1});
ASSERT_TRUE(read_future3.ready());
TENSORSTORE_EXPECT_OK(read_future3);
ASSERT_TRUE(log.transaction_reads.empty());
ASSERT_TRUE(log.writebacks.empty());
}
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
ASSERT_EQ(1u, log.transaction_reads.size());
ASSERT_TRUE(log.writebacks.empty());
read_time2 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time1,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = UniqueNow();
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
EXPECT_FALSE(HaveSameSharedState(read_future, read_future1));
{
auto read_future2 = weak_node->Read({absl::InfiniteFuture()});
EXPECT_TRUE(HaveSameSharedState(read_future1, read_future2));
}
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time2,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
auto read_time2 = absl::Now();
{
auto read_req = log.transaction_reads.pop();
EXPECT_EQ(read_time,
AsyncCache::ReadLock<void>(*read_req.node).stamp().time);
read_req.Success(read_time2);
}
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future.ready());
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
auto read_time = absl::Now();
{
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
{
auto read_future = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future.ready());
{
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
ASSERT_FALSE(read_future1.ready());
}
auto read_future1 = weak_node->Read({absl::InfiniteFuture()});
auto read_time = absl::Now();
ASSERT_FALSE(read_future1.ready());
ASSERT_EQ(1, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
{
auto read_req = log.transaction_reads.pop();
read_req.Success(read_time);
}
ASSERT_TRUE(read_future.ready());
TENSORSTORE_EXPECT_OK(read_future);
ASSERT_TRUE(read_future1.ready());
TENSORSTORE_EXPECT_OK(read_future1);
ASSERT_EQ(0, log.transaction_reads.size());
ASSERT_EQ(0, log.writebacks.size());
}
write_req.Success();
ASSERT_TRUE(commit_future.ready());
TENSORSTORE_EXPECT_OK(commit_future);
}
TEST(AsyncCacheTest, TransactionalWritebackSuccess) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node.reset(node.get());
}
auto future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Success();
}
ASSERT_TRUE(future.ready());
TENSORSTORE_EXPECT_OK(future);
}
TEST(AsyncCacheTest, TransactionalWritebackError) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
weak_node.reset(entry->CreateWriteTransaction(open_transaction).get());
}
auto future = transaction.CommitAsync();
auto error = absl::UnknownError("write error");
{
auto write_req = log.writebacks.pop();
EXPECT_EQ(weak_node.get(), write_req.node);
write_req.Error(error);
}
ASSERT_TRUE(future.ready());
EXPECT_EQ(error, future.status());
}
TEST(AsyncCacheTest, ConcurrentTransactionCommit) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
static constexpr size_t kNumEntries = 2;
tensorstore::internal::PinnedCacheEntry<TestCache> entries[kNumEntries];
for (size_t i = 0; i < kNumEntries; ++i) {
entries[i] = GetCacheEntry(cache, tensorstore::StrCat(i));
}
static constexpr size_t kNumTransactions = 3;
std::vector<Transaction> transactions(kNumTransactions, no_transaction);
TestConcurrent<kNumTransactions>(
100,
[&] {
for (size_t i = 0; i < kNumTransactions; ++i) {
auto& transaction = transactions[i];
transaction = Transaction(tensorstore::atomic_isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
transaction));
for (size_t j = 0; j < kNumEntries; ++j) {
entries[(i + j) % kNumEntries]->CreateWriteTransaction(
open_transaction);
}
ASSERT_FALSE(transaction.future().ready());
}
},
[&] {
TransactionState* expected_transactions[kNumTransactions];
for (size_t i = 0; i < kNumTransactions; ++i) {
auto& transaction = transactions[i];
ASSERT_TRUE(transaction.commit_started());
ASSERT_FALSE(transaction.future().ready());
expected_transactions[i] = TransactionState::get(transaction);
}
TransactionState* transaction_order[kNumTransactions];
for (size_t i = 0; i < kNumTransactions; ++i) {
PinnedCacheEntry<TestCache> entry_order[kNumEntries];
ASSERT_EQ(kNumEntries, log.writebacks.size());
for (size_t j = 0; j < kNumEntries; ++j) {
auto write_req = log.writebacks.pop();
entry_order[j].reset(static_cast<TestCache::Entry*>(
&GetOwningEntry(*write_req.node)));
if (j == 0) {
transaction_order[i] = write_req.node->transaction();
} else {
ASSERT_EQ(transaction_order[i], write_req.node->transaction());
}
write_req.Success();
}
EXPECT_THAT(entry_order,
::testing::UnorderedElementsAreArray(entries));
}
EXPECT_THAT(transaction_order, ::testing::UnorderedElementsAreArray(
expected_transactions));
for (auto& transaction : transactions) {
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_ASSERT_OK(transaction.future());
transaction = no_transaction;
}
},
[&](size_t i) { transactions[i].CommitAsync().IgnoreFuture(); });
}
TEST(AsyncCacheTest, DoInitializeTransactionError) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
entry->do_initialize_transaction_error = absl::UnknownError("initialize");
{
OpenTransactionPtr transaction;
EXPECT_THAT(
GetTransactionNode(*entry, transaction).status(),
tensorstore::MatchesStatus(absl::StatusCode::kUnknown, "initialize.*"));
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
Transaction(tensorstore::isolated)));
EXPECT_THAT(
GetTransactionNode(*entry, transaction).status(),
tensorstore::MatchesStatus(absl::StatusCode::kUnknown, "initialize.*"));
}
}
TEST(AsyncCacheTest, ConcurrentInitializeExplicitTransaction) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
OpenTransactionPtr open_transaction;
TestConcurrent<2>(
100,
[&] {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(
Transaction(tensorstore::isolated)));
},
[] {},
[&](size_t i) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto node, GetTransactionNode(*entry, open_transaction));
EXPECT_EQ(1, node->value);
});
}
TEST(AsyncCacheTest, ConcurrentInitializeImplicitTransaction) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
TestConcurrent<2>(
100,
[] {},
[&] { log.HandleWritebacks(); },
[&](size_t i) {
OpenTransactionPtr transaction;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto node, GetTransactionNode(*entry, transaction));
EXPECT_EQ(1, node->value);
});
}
TEST(AsyncCacheTest, ShareImplicitTransactionNodesFalse) {
auto pool = CachePool::Make(kSmallCacheLimits);
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto node = entry->CreateWriteTransaction();
auto node2 = entry->CreateWriteTransaction();
EXPECT_NE(node, node2);
node = {};
node2 = {};
log.HandleWritebacks();
}
TEST(AsyncCacheTest, ReadSizeInBytes) {
auto pool = CachePool::Make(CachePool::Limits{20000});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
{
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::Now()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(19000));
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::Pointee(19000));
auto read_future = entry->Read({absl::InfiniteFuture()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(21000));
ASSERT_TRUE(read_future.ready());
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::IsNull());
auto read_future = entry->Read({absl::InfiniteFuture()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(1000));
ASSERT_TRUE(read_future.ready());
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::Pointee(1000));
auto write_future = entry->CreateWriteTransactionFuture();
write_future.Force();
log.writebacks.pop().Success(absl::Now(), std::make_shared<size_t>(21000));
ASSERT_TRUE(write_future.ready());
}
{
auto entry = GetCacheEntry(cache, "a");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry).data(),
::testing::IsNull());
}
}
TEST(AsyncCacheTest, ExplicitTransactionSize) {
auto pool = CachePool::Make(CachePool::Limits{20000});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
{
auto entry_b = GetCacheEntry(cache, "b");
auto read_future = entry_b->Read({absl::Now()});
log.reads.pop().Success(absl::Now(), std::make_shared<size_t>(1000));
}
auto transaction = Transaction(tensorstore::isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
{
auto entry_a = GetCacheEntry(cache, "a");
{
auto node = entry_a->CreateWriteTransaction(open_transaction);
UniqueWriterLock lock(*node);
node->size = 100000;
node->MarkSizeUpdated();
}
EXPECT_EQ(100000, transaction.total_bytes());
auto entry_c = GetCacheEntry(cache, "c");
{
auto node = entry_c->CreateWriteTransaction(open_transaction);
UniqueWriterLock lock(*node);
node->size = 500;
node->MarkSizeUpdated();
}
EXPECT_EQ(100500, transaction.total_bytes());
{
auto node = entry_a->CreateWriteTransaction(open_transaction);
UniqueWriterLock lock(*node);
node->size = 110000;
node->MarkSizeUpdated();
}
EXPECT_EQ(110500, transaction.total_bytes());
}
{
auto entry_b = GetCacheEntry(cache, "b");
EXPECT_THAT(AsyncCache::ReadLock<size_t>(*entry_b).data(),
::testing::Pointee(1000));
}
}
void TestRevokedTransactionNode(bool reverse_order) {
auto pool = CachePool::Make(CachePool::Limits{});
RequestLog log;
auto cache = GetCache<TestCache>(
pool.get(), "", [&] { return std::make_unique<TestCache>(&log); });
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node1;
WeakTransactionNodePtr<TestCache::TransactionNode> weak_node2;
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
{
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node1.reset(node.get());
node->Revoke();
}
{
auto node = entry->CreateWriteTransaction(open_transaction);
EXPECT_EQ(node, GetTransactionNode(*entry, open_transaction));
weak_node2.reset(node.get());
}
}
auto future = transaction.CommitAsync();
EXPECT_TRUE(transaction.commit_started());
{
auto write_req1 = log.writebacks.pop();
EXPECT_EQ(weak_node1.get(), write_req1.node);
auto write_req2 = log.writebacks.pop();
EXPECT_EQ(weak_node2.get(), write_req2.node);
if (reverse_order) {
write_req2.Success();
write_req1.Success();
} else {
write_req1.Success();
write_req2.Success();
}
}
ASSERT_TRUE(future.ready());
TENSORSTORE_EXPECT_OK(future);
}
TEST(AsyncCacheTest, RevokedTransactionNodeFifo) {
TestRevokedTransactionNode(false);
}
TEST(AsyncCacheTest, RevokedTransactionNodeLifo) {
TestRevokedTransactionNode(true);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/async_cache.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/async_cache_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
887f9b4a-0eba-4a16-8403-657c911b9331 | cpp | google/tensorstore | cache_pool_resource | tensorstore/internal/cache/cache_pool_resource.cc | tensorstore/internal/cache/cache_pool_resource_test.cc | #include "tensorstore/internal/cache/cache_pool_resource.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
namespace {
struct CachePoolResourceTraits
: public ContextResourceTraits<CachePoolResource> {
using Spec = CachePool::Limits;
using Resource = typename CachePoolResource::Resource;
static constexpr Spec Default() { return {}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(
jb::Member("total_bytes_limit",
jb::Projection(&Spec::total_bytes_limit,
jb::DefaultValue([](auto* v) { *v = 0; }))));
}
static Result<Resource> Create(const Spec& limits,
ContextResourceCreationContext context) {
return CachePool::WeakPtr(CachePool::Make(limits));
}
static Spec GetSpec(const Resource& pool, const ContextSpecBuilder& builder) {
return pool->limits();
}
static void AcquireContextReference(const Resource& p) {
internal_cache::StrongPtrTraitsCachePool::increment(p.get());
}
static void ReleaseContextReference(const Resource& p) {
internal_cache::StrongPtrTraitsCachePool::decrement(p.get());
}
};
const ContextResourceRegistration<CachePoolResourceTraits> registration;
}
}
} | #include "tensorstore/internal/cache/cache_pool_resource.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::CachePoolResource;
TEST(CachePoolResourceTest, Default) {
auto resource_spec = Context::Resource<CachePoolResource>::DefaultSpec();
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(0u, (*cache)->limits().total_bytes_limit);
}
TEST(CachePoolResourceTest, EmptyObject) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<CachePoolResource>::FromJson(
::nlohmann::json::object_t{}));
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(0u, (*cache)->limits().total_bytes_limit);
}
TEST(CachePoolResourceTest, TotalBytesLimitOnly) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<CachePoolResource>::FromJson(
{{"total_bytes_limit", 100}}));
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(100u, (*cache)->limits().total_bytes_limit);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache_pool_resource.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache_pool_resource_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
06bf8626-83aa-466b-9eec-d04667fc399d | cpp | google/tensorstore | kvs_backed_cache | tensorstore/internal/cache/kvs_backed_cache.cc | tensorstore/internal/cache/kvs_backed_cache_test.cc | #include "tensorstore/internal/cache/kvs_backed_cache.h"
#include <cstdint>
#include <string>
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
namespace tensorstore {
namespace internal {
namespace {
auto& kvs_cache_read = internal_metrics::Counter<int64_t, std::string>::New(
"/tensorstore/cache/kvs_cache_read", "category",
internal_metrics::MetricMetadata(
"Count of kvs_backed_cache reads by category. A large number of "
"'unchanged' reads indicates that the dataset is relatively "
"quiescent."));
}
void KvsBackedCache_IncrementReadUnchangedMetric() {
static auto& cell = kvs_cache_read.GetCell("unchanged");
cell.Increment();
}
void KvsBackedCache_IncrementReadChangedMetric() {
static auto& cell = kvs_cache_read.GetCell("changed");
cell.Increment();
}
void KvsBackedCache_IncrementReadErrorMetric() {
static auto& cell = kvs_cache_read.GetCell("error");
cell.Increment();
}
}
} | #include <stddef.h>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::KeyRange;
using ::tensorstore::MatchesStatus;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::OpenTransactionPtr;
TENSORSTORE_GLOBAL_INITIALIZER {
using ::tensorstore::internal::KvsBackedCacheBasicTransactionalTestOptions;
using ::tensorstore::internal::RegisterKvsBackedCacheBasicTransactionalTest;
{
KvsBackedCacheBasicTransactionalTestOptions options;
options.test_name = "MemoryNonAtomic";
options.get_store = [] {
return tensorstore::GetMemoryKeyValueStore(false);
};
options.multi_key_atomic_supported = false;
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
{
KvsBackedCacheBasicTransactionalTestOptions options;
options.test_name = "MemoryAtomic";
options.get_store = [] {
return tensorstore::GetMemoryKeyValueStore(true);
};
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
}
class MockStoreTest : public ::testing::Test {
protected:
CachePool::StrongPtr pool = CachePool::Make(CachePool::Limits{});
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr memory_store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::CachePtr<KvsBackedTestCache> GetCache(
std::string cache_identifier = {},
kvstore::DriverPtr kvstore_driver = {}) {
if (!kvstore_driver) kvstore_driver = mock_store;
return tensorstore::internal::GetCache<KvsBackedTestCache>(
pool.get(), cache_identifier,
[&] { return std::make_unique<KvsBackedTestCache>(kvstore_driver); });
}
tensorstore::internal::CachePtr<KvsBackedTestCache> cache = GetCache();
};
TEST_F(MockStoreTest, ReadSuccess) {
auto entry = GetCacheEntry(cache, "a");
auto read_time = absl::Now();
auto read_future = entry->Read({read_time});
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
EXPECT_EQ(tensorstore::OptionalByteRangeRequest{},
read_req.options.byte_range);
EXPECT_EQ(read_time, read_req.options.staleness_bound);
read_req(memory_store);
}
TEST_F(MockStoreTest, ReadError) {
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::Now()});
auto read_req = mock_store->read_requests.pop();
read_req.promise.SetResult(absl::FailedPreconditionError("read error"));
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"a\": read error"));
}
TEST_F(MockStoreTest, WriteError) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
auto write_req = mock_store->write_requests.pop();
write_req.promise.SetResult(absl::FailedPreconditionError("write error"));
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error writing \"a\": write error"));
}
TEST_F(MockStoreTest, ReadErrorDuringWriteback) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
auto read_req = mock_store->read_requests.pop();
read_req.promise.SetResult(absl::FailedPreconditionError("read error"));
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"a\": read error"));
}
TEST_F(MockStoreTest, ReadErrorDueToValidateDuringWriteback) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Validate(
open_transaction, [](absl::Cord data) { return absl::OkStatus(); }));
auto read_future = entry->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(), ::testing::Optional(absl::Cord()));
}
transaction.CommitAsync().IgnoreFuture();
auto read_req = mock_store->read_requests.pop();
read_req.promise.SetResult(absl::FailedPreconditionError("read error"));
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"a\": read error"));
}
TEST_F(MockStoreTest, WriteDuringRead) {
auto entry = GetCacheEntry(cache, "a");
auto read_future = entry->Read({absl::InfinitePast()});
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
auto read_future2 = entry->Read({absl::InfinitePast()});
{
auto read_req = mock_store->read_requests.pop();
read_req(memory_store);
TENSORSTORE_ASSERT_OK(read_future);
TENSORSTORE_ASSERT_OK(read_future2);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
TENSORSTORE_ASSERT_OK(transaction.future());
}
}
TEST_F(MockStoreTest, MultiPhaseSeparateKeys) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("x"), "a")
->Modify(open_transaction, false, "abc"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("x"), "b")
->Modify(open_transaction, false, "de"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("y"), "b")
->Modify(open_transaction, false, "f"));
}
transaction.CommitAsync().IgnoreFuture();
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
}
EXPECT_THAT(memory_store->Read("a").result(),
MatchesKvsReadResult(absl::Cord("abc")));
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", write_req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("def", write_req.value);
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
EXPECT_THAT(memory_store->Read("b").result(),
MatchesKvsReadResult(absl::Cord("def")));
}
TEST_F(MockStoreTest, MultiPhaseSameKey) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "def"));
}
transaction.CommitAsync().IgnoreFuture();
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto read_result,
memory_store->Read("a").result());
EXPECT_THAT(read_result, MatchesKvsReadResult(absl::Cord("abc")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto write_stamp, memory_store->Write("a", absl::Cord("xyz")).result());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(read_result.stamp.generation,
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abcdef", write_req.value);
write_req(memory_store);
}
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(read_result.stamp.generation,
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(write_stamp.generation,
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("xyzdef", write_req.value);
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
EXPECT_THAT(memory_store->Read("a").result(),
MatchesKvsReadResult(absl::Cord("xyzdef")));
}
TEST_F(MockStoreTest, MultiPhaseSameKeyAbort) {
auto entry = GetCacheEntry(cache, "a");
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "def"));
}
transaction.Abort();
}
TEST_F(MockStoreTest, DeleteRangeSingle) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeError) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req.promise.SetResult(absl::FailedPreconditionError("delete range error"));
}
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"delete range error"));
}
TEST_F(MockStoreTest, DeleteRangeAtomicError) {
auto transaction = Transaction(tensorstore::atomic_isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
EXPECT_THAT(mock_store->TransactionalDeleteRange(open_transaction,
KeyRange{"a", "c"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot delete range starting at \"a\" as single "
"atomic transaction"));
}
}
TEST_F(MockStoreTest, DeleteRangeMultipleDisjoint) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"d", "f"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("d", "f"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeMultipleOverlapping) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"b", "f"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "f"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeBeforeWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "b"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange(KeyRange::Successor("b"), "c"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
EXPECT_THAT(write_req.value, ::testing::Optional(std::string("abc")));
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeBeforeWriteJustBeforeExclusiveMax) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", KeyRange::Successor("b")}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "b"), req.range);
req(memory_store);
}
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_EQ("b", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
EXPECT_EQ("abc", write_req.value);
write_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeAfterWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeAfterValidateError) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")
->Validate(open_transaction, [](absl::Cord value) {
return absl::FailedPreconditionError("validate error");
}));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
mock_store->read_requests.pop()(memory_store);
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_TRUE(mock_store->delete_range_requests.empty());
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error writing \"b\": validate error"));
}
TEST_F(MockStoreTest, DeleteRangeAfterValidateAndModify) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")
->Validate(open_transaction, [](const absl::Cord& input) {
return absl::OkStatus();
}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto read_req = mock_store->read_requests.pop();
EXPECT_TRUE(mock_store->read_requests.empty());
EXPECT_TRUE(mock_store->write_requests.empty());
EXPECT_TRUE(mock_store->delete_range_requests.empty());
EXPECT_EQ("b", read_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
read_req.options.generation_conditions.if_not_equal);
read_req(memory_store);
}
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, MultiPhaseValidateError) {
auto transaction = Transaction(tensorstore::isolated);
auto entry = GetCacheEntry(cache, "a");
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
open_transaction->Barrier();
auto validator = [](absl::Cord value) {
if (value != "abc") {
return absl::AbortedError("validation");
}
return absl::OkStatus();
};
TENSORSTORE_ASSERT_OK(entry->Validate(open_transaction, validator));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
write_req(memory_store);
}
TENSORSTORE_ASSERT_OK(memory_store->Write("a", absl::Cord("def")));
ASSERT_FALSE(transaction.future().ready());
{
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
EXPECT_EQ(tensorstore::OptionalByteRangeRequest(0, 0),
read_req.options.byte_range);
read_req(memory_store);
}
{
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
read_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kAborted));
}
TEST_F(MockStoreTest, MultiPhaseValidateErrorAfterReadValue) {
auto transaction = Transaction(tensorstore::isolated);
auto entry = GetCacheEntry(cache, "a");
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc"));
open_transaction->Barrier();
auto validator = [](absl::Cord value) {
if (value != "abc") {
return absl::AbortedError("validation: " + std::string(value));
}
return absl::OkStatus();
};
TENSORSTORE_ASSERT_OK(entry->Validate(open_transaction, validator));
TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "xyz"));
TENSORSTORE_ASSERT_OK(entry->Validate(
open_transaction, [](absl::Cord value) { return absl::OkStatus(); }));
EXPECT_THAT(entry->ReadValue(open_transaction).result(),
::testing::Optional(absl::Cord("xyz")));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_EQ("a", write_req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
write_req.options.generation_conditions.if_equal);
write_req(memory_store);
}
TENSORSTORE_ASSERT_OK(memory_store->Write("a", absl::Cord("def")));
ASSERT_FALSE(transaction.future().ready());
{
auto write_req = mock_store->write_requests.pop();
EXPECT_EQ("a", write_req.key);
write_req(memory_store);
}
{
auto read_req = mock_store->read_requests.pop();
EXPECT_EQ("a", read_req.key);
read_req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
EXPECT_THAT(transaction.future().result(),
MatchesStatus(absl::StatusCode::kAborted));
}
TEST_F(MockStoreTest, UnboundedDeleteRangeAfterWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", ""}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", ""), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, DeleteRangeThenWriteThenDeleteRange) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "d"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "d"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, MultiPhaseDeleteRangeOverlapEnd) {
const std::vector<std::vector<KeyRange>> test_cases = {
{
KeyRange{"a", "c"},
KeyRange{"a", "c"},
},
{
KeyRange{"a", "c"},
KeyRange{"a", "d"},
},
{
KeyRange{"b", "c"},
KeyRange{"a", "c"},
},
{
KeyRange{"b", "c"},
KeyRange{"a", "d"},
},
{
KeyRange{"a", "d"},
KeyRange{"b", "c"},
},
};
for (const auto& test_case : test_cases) {
SCOPED_TRACE("test_case=" + ::testing::PrintToString(test_case));
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
for (const auto& range : test_case) {
TENSORSTORE_ASSERT_OK(
mock_store->TransactionalDeleteRange(open_transaction, range));
open_transaction->Barrier();
}
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
for (const auto& range : test_case) {
auto req = mock_store->delete_range_requests.pop();
EXPECT_TRUE(mock_store->delete_range_requests.empty());
EXPECT_EQ(range, req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
}
TEST_F(MockStoreTest, MultiPhaseDeleteRangeAndWrite) {
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "c"}));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange(
open_transaction, KeyRange{"a", "d"}));
}
transaction.CommitAsync().IgnoreFuture();
ASSERT_FALSE(transaction.future().ready());
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_TRUE(mock_store->delete_range_requests.empty());
EXPECT_EQ(KeyRange("a", "c"), req.range);
req(memory_store);
}
{
auto req = mock_store->delete_range_requests.pop();
EXPECT_EQ(KeyRange("a", "d"), req.range);
req(memory_store);
}
ASSERT_TRUE(transaction.future().ready());
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(MockStoreTest, MultipleKeyValueStoreAtomicError) {
auto transaction = Transaction(tensorstore::atomic_isolated);
auto mock_store2 = MockKeyValueStore::Make();
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(
GetCacheEntry(cache, "x")->Modify(open_transaction, false, "abc"));
EXPECT_THAT(GetCacheEntry(GetCache("", mock_store2), "y")
->Modify(open_transaction, false, "abc"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot read/write \"x\" and read/write \"y\" as "
"single atomic transaction"));
}
}
class InitializationRaceTestingKvstore : public MockKeyValueStore {
public:
std::function<void()> on_read_modify_write;
absl::Status ReadModifyWrite(
tensorstore::internal::OpenTransactionPtr& transaction, size_t& phase,
Key key, ReadModifyWriteSource& source) override {
if (on_read_modify_write) on_read_modify_write();
return MockKeyValueStore::ReadModifyWrite(transaction, phase,
std::move(key), source);
}
};
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/kvs_backed_cache.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/kvs_backed_cache_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
60f7a046-9b9b-48c8-8cb5-20c774aecc0b | cpp | google/tensorstore | cache | tensorstore/internal/cache/cache.cc | tensorstore/internal/cache/cache_test.cc | #include "tensorstore/internal/cache/cache.h"
#include <stddef.h>
#include <stdint.h>
#include <array>
#include <atomic>
#include <bitset>
#include <cassert>
#include <memory>
#include <mutex>
#include <string>
#include <string_view>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/cache/cache_pool_limits.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/type_traits.h"
using ::tensorstore::internal_metrics::MetricMetadata;
namespace tensorstore {
namespace internal_cache {
auto& hit_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/hit_count", MetricMetadata("Number of cache hits."));
auto& miss_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/miss_count", MetricMetadata("Number of cache misses."));
auto& evict_count = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/evict_count",
MetricMetadata("Number of evictions from the cache."));
using ::tensorstore::internal::PinnedCacheEntry;
#if !defined(NDEBUG)
inline void DebugAssertMutexHeld(absl::Mutex* mutex) { mutex->AssertHeld(); }
#else
inline void DebugAssertMutexHeld(absl::Mutex* mutex) {}
#endif
using LruListAccessor =
internal::intrusive_linked_list::MemberAccessor<LruListNode>;
CachePoolImpl::CachePoolImpl(const CachePool::Limits& limits)
: limits_(limits),
total_bytes_(0),
strong_references_(1),
weak_references_(1) {
Initialize(LruListAccessor{}, &eviction_queue_);
}
namespace {
inline void AcquireWeakReference(CachePoolImpl* p) {
[[maybe_unused]] auto old_count =
p->weak_references_.fetch_add(1, std::memory_order_relaxed);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:weak:increment", p,
old_count + 1);
}
void ReleaseWeakReference(CachePoolImpl* p) {
auto new_count = --p->weak_references_;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:weak:decrement", p,
new_count);
if (new_count == 0) {
delete Access::StaticCast<CachePool>(p);
}
}
struct DecrementCacheReferenceCount {
explicit DecrementCacheReferenceCount(CacheImpl* cache_impl, size_t amount) {
old_count = cache_impl->reference_count_.fetch_sub(
amount, std::memory_order_acq_rel);
new_count = old_count - amount;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("Cache:decrement", cache_impl,
new_count);
}
bool should_delete() const {
return !CacheImpl::ShouldDelete(old_count) &&
CacheImpl::ShouldDelete(new_count);
}
bool should_release_cache_pool_weak_reference() const {
assert(old_count - new_count == CacheImpl::kStrongReferenceIncrement);
return !CacheImpl::ShouldHoldPoolWeakReference(new_count);
}
size_t old_count, new_count;
};
void UnlinkListNode(LruListNode* node) noexcept {
Remove(LruListAccessor{}, node);
Initialize(LruListAccessor{}, node);
}
void UnregisterEntryFromPool(CacheEntryImpl* entry,
CachePoolImpl* pool) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
UnlinkListNode(entry);
pool->total_bytes_.fetch_sub(entry->num_bytes_, std::memory_order_relaxed);
}
void AddToEvictionQueue(CachePoolImpl* pool, CacheEntryImpl* entry) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
auto* eviction_queue = &pool->eviction_queue_;
if (!OnlyContainsNode(LruListAccessor{}, entry)) {
Remove(LruListAccessor{}, entry);
}
InsertBefore(LruListAccessor{}, eviction_queue, entry);
}
void DestroyCache(CachePoolImpl* pool, CacheImpl* cache);
void MaybeEvictEntries(CachePoolImpl* pool) noexcept {
DebugAssertMutexHeld(&pool->lru_mutex_);
constexpr size_t kBufferSize = 64;
std::array<CacheEntryImpl*, kBufferSize> entries_to_delete;
std::bitset<kBufferSize> should_delete_cache_for_entry;
size_t num_entries_to_delete = 0;
const auto destroy_entries = [&] {
internal::ScopedWriterUnlock unlock(pool->lru_mutex_);
for (size_t i = 0; i < num_entries_to_delete; ++i) {
auto* entry = entries_to_delete[i];
if (should_delete_cache_for_entry[i]) {
DestroyCache(entry->cache_->pool_, entry->cache_);
}
entry->cache_ = nullptr;
delete Access::StaticCast<CacheEntry>(entry);
}
};
while (pool->total_bytes_.load(std::memory_order_acquire) >
pool->limits_.total_bytes_limit) {
auto* queue = &pool->eviction_queue_;
if (queue->next == queue) {
break;
}
auto* entry = static_cast<CacheEntryImpl*>(queue->next);
auto* cache = entry->cache_;
bool evict = false;
bool should_delete_cache = false;
auto& shard = cache->ShardForKey(entry->key_);
if (absl::MutexLock lock(&shard.mutex);
entry->reference_count_.load(std::memory_order_acquire) == 0) {
[[maybe_unused]] size_t erase_count = shard.entries.erase(entry);
assert(erase_count == 1);
if (shard.entries.empty()) {
if (DecrementCacheReferenceCount(cache,
CacheImpl::kNonEmptyShardIncrement)
.should_delete()) {
should_delete_cache = true;
}
}
evict = true;
}
if (!evict) {
UnlinkListNode(entry);
continue;
}
UnregisterEntryFromPool(entry, pool);
evict_count.Increment();
should_delete_cache_for_entry[num_entries_to_delete] = should_delete_cache;
entries_to_delete[num_entries_to_delete++] = entry;
if (num_entries_to_delete == entries_to_delete.size()) {
destroy_entries();
num_entries_to_delete = 0;
}
}
destroy_entries();
}
void InitializeNewEntry(CacheEntryImpl* entry, CacheImpl* cache) noexcept {
entry->cache_ = cache;
entry->reference_count_.store(2, std::memory_order_relaxed);
entry->num_bytes_ = 0;
Initialize(LruListAccessor{}, entry);
}
void DestroyCache(CachePoolImpl* pool,
CacheImpl* cache) ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (pool) {
if (!cache->cache_identifier_.empty()) {
absl::MutexLock lock(&pool->caches_mutex_);
auto it = pool->caches_.find(cache);
if (it != pool->caches_.end() && *it == cache) {
pool->caches_.erase(it);
}
}
if (HasLruCache(pool)) {
absl::MutexLock lru_lock(&pool->lru_mutex_);
for (auto& shard : cache->shards_) {
absl::MutexLock lock(&shard.mutex);
for (CacheEntryImpl* entry : shard.entries) {
entry->reference_count_.fetch_add(2, std::memory_order_acq_rel);
UnregisterEntryFromPool(entry, pool);
}
}
} else {
for (auto& shard : cache->shards_) {
absl::MutexLock lock(&shard.mutex);
for (CacheEntryImpl* entry : shard.entries) {
entry->reference_count_.fetch_add(2, std::memory_order_acq_rel);
}
}
}
for (auto& shard : cache->shards_) {
for (CacheEntryImpl* entry : shard.entries) {
assert(entry->reference_count_.load() >= 2 &&
entry->reference_count_.load() <= 3);
delete Access::StaticCast<Cache::Entry>(entry);
}
}
}
delete Access::StaticCast<Cache>(cache);
}
template <typename T, typename LockFn>
inline UniqueWriterLock<absl::Mutex> DecrementReferenceCountWithLock(
std::atomic<T>& reference_count, LockFn mutex_fn, T& new_count,
internal::type_identity_t<T> decrease_amount,
internal::type_identity_t<T> lock_threshold) {
static_assert(std::is_invocable_v<LockFn>);
static_assert(std::is_same_v<absl::Mutex&, std::invoke_result_t<LockFn>>);
{
auto count = reference_count.load(std::memory_order_relaxed);
while (true) {
if (count <= lock_threshold + decrease_amount) break;
if (reference_count.compare_exchange_weak(count, count - decrease_amount,
std::memory_order_acq_rel)) {
new_count = count - decrease_amount;
return {};
}
}
}
UniqueWriterLock lock(mutex_fn());
auto count =
reference_count.fetch_sub(decrease_amount, std::memory_order_acq_rel) -
decrease_amount;
new_count = count;
if (count > lock_threshold) {
return {};
}
return lock;
}
}
void StrongPtrTraitsCacheEntry::decrement_impl(
CacheEntryImpl* entry_impl) noexcept ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto* cache = entry_impl->cache_;
uint32_t new_count;
if (auto* pool_impl = cache->pool_) {
if (pool_impl->limits_.total_bytes_limit == 0) {
CacheImpl::Shard* shard = nullptr;
auto lock = DecrementReferenceCountWithLock(
entry_impl->reference_count_,
[&]() -> absl::Mutex& {
shard = &cache->ShardForKey(entry_impl->key_);
return shard->mutex;
},
new_count,
2, 1);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement",
entry_impl, new_count);
if (!lock) return;
if (new_count == 0) {
shard->entries.erase(entry_impl);
if (shard->entries.empty()) {
cache->reference_count_.fetch_sub(CacheImpl::kNonEmptyShardIncrement,
std::memory_order_relaxed);
}
delete entry_impl;
}
} else {
auto lock = DecrementReferenceCountWithLock(
entry_impl->reference_count_,
[pool_impl]() -> absl::Mutex& { return pool_impl->lru_mutex_; },
new_count,
2, 1);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement",
entry_impl, new_count);
if (!lock) return;
if (new_count == 0) {
AddToEvictionQueue(pool_impl, entry_impl);
MaybeEvictEntries(pool_impl);
}
}
assert(new_count <= 1);
} else {
new_count =
entry_impl->reference_count_.fetch_sub(2, std::memory_order_acq_rel) -
2;
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement",
entry_impl, new_count);
if (new_count > 1) return;
delete entry_impl;
}
StrongPtrTraitsCache::decrement(Access::StaticCast<Cache>(cache));
}
inline bool TryToAcquireCacheStrongReference(CachePoolImpl* pool,
CacheImpl* cache_impl) {
auto old_count = cache_impl->reference_count_.load(std::memory_order_relaxed);
while (true) {
if (CacheImpl::ShouldDelete(old_count)) {
return false;
}
if (cache_impl->reference_count_.compare_exchange_weak(
old_count, old_count + CacheImpl::kStrongReferenceIncrement,
std::memory_order_acq_rel)) {
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT(
"Cache:increment", cache_impl,
old_count + CacheImpl::kStrongReferenceIncrement);
if (!CacheImpl::ShouldHoldPoolWeakReference(old_count)) {
AcquireWeakReference(pool);
}
return true;
}
}
}
CachePtr<Cache> GetCacheInternal(
CachePoolImpl* pool, const std::type_info& cache_type,
std::string_view cache_key,
absl::FunctionRef<std::unique_ptr<Cache>()> make_cache) {
CachePoolImpl::CacheKey key(cache_type, cache_key);
if (pool && !cache_key.empty()) {
absl::MutexLock lock(&pool->caches_mutex_);
auto it = pool->caches_.find(key);
if (it != pool->caches_.end()) {
auto* cache = *it;
if (!TryToAcquireCacheStrongReference(pool, cache)) {
pool->caches_.erase(it);
} else {
return CachePtr<Cache>(Access::StaticCast<Cache>(cache),
internal::adopt_object_ref);
}
}
}
std::unique_ptr<Cache> new_cache = make_cache();
if (!new_cache) return CachePtr<Cache>();
auto* cache_impl = Access::StaticCast<CacheImpl>(new_cache.get());
cache_impl->pool_ = pool;
if (!pool || cache_key.empty()) {
if (pool) {
AcquireWeakReference(pool);
}
new_cache.release();
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT(
"Cache:increment", cache_impl, CacheImpl::kStrongReferenceIncrement);
cache_impl->reference_count_.store(CacheImpl::kStrongReferenceIncrement,
std::memory_order_relaxed);
return CachePtr<Cache>(Access::StaticCast<Cache>(cache_impl),
internal::adopt_object_ref);
}
cache_impl->cache_type_ = &cache_type;
cache_impl->cache_identifier_ = std::string(cache_key);
absl::MutexLock lock(&pool->caches_mutex_);
auto insert_result = pool->caches_.insert(cache_impl);
if (insert_result.second ||
!TryToAcquireCacheStrongReference(pool, *insert_result.first)) {
if (!insert_result.second) {
const_cast<CacheImpl*&>(*insert_result.first) = cache_impl;
}
new_cache.release();
size_t initial_count = CacheImpl::kStrongReferenceIncrement;
if (pool->strong_references_.load(std::memory_order_relaxed) != 0) {
initial_count += CacheImpl::kCachePoolStrongReferenceIncrement;
}
cache_impl->reference_count_.store(initial_count,
std::memory_order_relaxed);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("Cache:increment", cache_impl,
initial_count);
AcquireWeakReference(pool);
}
return CachePtr<Cache>(Access::StaticCast<Cache>(*insert_result.first),
internal::adopt_object_ref);
}
PinnedCacheEntry<Cache> GetCacheEntryInternal(internal::Cache* cache,
std::string_view key) {
auto* cache_impl = Access::StaticCast<CacheImpl>(cache);
PinnedCacheEntry<Cache> returned_entry;
if (!cache_impl->pool_) {
std::string temp_key(key);
auto* entry_impl =
Access::StaticCast<CacheEntryImpl>(cache->DoAllocateEntry());
entry_impl->key_ = std::move(temp_key);
InitializeNewEntry(entry_impl, cache_impl);
StrongPtrTraitsCache::increment(cache);
returned_entry = PinnedCacheEntry<Cache>(
Access::StaticCast<CacheEntry>(entry_impl), internal::adopt_object_ref);
} else {
auto& shard = cache_impl->ShardForKey(key);
absl::MutexLock lock(&shard.mutex);
auto it = shard.entries.find(key);
if (it != shard.entries.end()) {
hit_count.Increment();
auto* entry_impl = *it;
auto old_count =
entry_impl->reference_count_.fetch_add(2, std::memory_order_acq_rel);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:increment",
entry_impl, old_count + 2);
if (old_count <= 1) {
StrongPtrTraitsCache::increment(cache);
}
returned_entry =
PinnedCacheEntry<Cache>(Access::StaticCast<Cache::Entry>(entry_impl),
internal::adopt_object_ref);
} else {
miss_count.Increment();
std::string temp_key(key);
auto* entry_impl =
Access::StaticCast<CacheEntryImpl>(cache->DoAllocateEntry());
entry_impl->key_ = std::move(temp_key);
InitializeNewEntry(entry_impl, cache_impl);
std::unique_ptr<CacheEntry> entry(
Access::StaticCast<CacheEntry>(entry_impl));
[[maybe_unused]] auto inserted = shard.entries.insert(entry_impl).second;
assert(inserted);
if (shard.entries.size() == 1) {
cache_impl->reference_count_.fetch_add(
CacheImpl::kNonEmptyShardIncrement, std::memory_order_relaxed);
}
StrongPtrTraitsCache::increment(cache);
returned_entry =
PinnedCacheEntry<Cache>(entry.release(), internal::adopt_object_ref);
}
}
auto* entry_impl = Access::StaticCast<CacheEntryImpl>(returned_entry.get());
absl::call_once(entry_impl->initialized_, [&] {
returned_entry->DoInitialize();
if (HasLruCache(cache_impl->pool_)) {
size_t new_size = entry_impl->num_bytes_ =
cache->DoGetSizeInBytes(returned_entry.get());
UpdateTotalBytes(*cache_impl->pool_, new_size);
}
});
return returned_entry;
}
void StrongPtrTraitsCache::decrement_impl(CacheImpl* cache) noexcept {
auto decrement_result =
DecrementCacheReferenceCount(cache, CacheImpl::kStrongReferenceIncrement);
CachePoolImpl* pool = nullptr;
if (decrement_result.should_release_cache_pool_weak_reference()) {
pool = cache->pool_;
}
if (decrement_result.should_delete()) {
DestroyCache(cache->pool_, cache);
}
if (pool) {
ReleaseWeakReference(pool);
}
}
CacheImpl::CacheImpl() : pool_(nullptr), reference_count_(0) {}
CacheImpl::~CacheImpl() = default;
void StrongPtrTraitsCachePool::increment(CachePool* p) noexcept {
auto* pool = Access::StaticCast<CachePoolImpl>(p);
if (pool->strong_references_.fetch_add(1, std::memory_order_acq_rel) == 0) {
AcquireWeakReference(Access::StaticCast<CachePoolImpl>(p));
}
}
void StrongPtrTraitsCachePool::decrement(CachePool* p) noexcept {
auto* pool = Access::StaticCast<CachePoolImpl>(p);
size_t new_count;
auto lock = DecrementReferenceCountWithLock(
pool->strong_references_,
[pool]() -> absl::Mutex& { return pool->caches_mutex_; }, new_count,
1, 0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CachePool:decrement", p,
new_count);
if (!lock) return;
std::vector<CacheImpl*> caches;
caches.reserve(pool->caches_.size());
for (auto* cache : pool->caches_) {
if (DecrementCacheReferenceCount(
cache, CacheImpl::kCachePoolStrongReferenceIncrement)
.should_delete()) {
caches.push_back(cache);
}
}
lock.unlock();
for (auto* cache : caches) {
DestroyCache(pool, cache);
}
ReleaseWeakReference(pool);
}
void WeakPtrTraitsCachePool::increment(CachePool* p) noexcept {
AcquireWeakReference(Access::StaticCast<CachePoolImpl>(p));
}
void WeakPtrTraitsCachePool::decrement(CachePool* p) noexcept {
ReleaseWeakReference(Access::StaticCast<CachePoolImpl>(p));
}
void intrusive_ptr_decrement(CacheEntryWeakState* p)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
size_t new_weak_count;
auto weak_lock = DecrementReferenceCountWithLock(
p->weak_references, [p]() -> absl::Mutex& { return p->mutex; },
new_weak_count,
1, 0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntryWeakState:decrement", p,
new_weak_count);
if (!weak_lock) return;
auto* entry = p->entry;
if (!entry) {
weak_lock = {};
delete p;
return;
}
uint32_t new_count;
auto* cache = entry->cache_;
auto* pool = cache->pool_;
ABSL_ASSUME(pool);
if (!HasLruCache(pool)) {
CacheImpl::Shard* shard = nullptr;
auto entries_lock = DecrementReferenceCountWithLock(
entry->reference_count_,
[&]() -> absl::Mutex& {
shard = &cache->ShardForKey(entry->key_);
return shard->mutex;
},
new_count,
1, 0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement", entry,
new_count);
weak_lock = {};
if (!entries_lock) return;
[[maybe_unused]] size_t erase_count = shard->entries.erase(entry);
assert(erase_count == 1);
bool should_delete_cache = false;
if (shard->entries.empty()) {
if (DecrementCacheReferenceCount(cache,
CacheImpl::kNonEmptyShardIncrement)
.should_delete()) {
should_delete_cache = true;
}
}
entries_lock = {};
delete Access::StaticCast<CacheEntry>(entry);
if (should_delete_cache) {
DestroyCache(pool, cache);
}
return;
}
auto pool_lock = DecrementReferenceCountWithLock(
entry->reference_count_,
[pool]() -> absl::Mutex& { return pool->lru_mutex_; }, new_count,
1,
0);
TENSORSTORE_INTERNAL_CACHE_DEBUG_REFCOUNT("CacheEntry:decrement", entry,
new_count);
if (!pool_lock) return;
weak_lock = {};
AddToEvictionQueue(pool, entry);
MaybeEvictEntries(pool);
}
internal::IntrusivePtr<CacheEntryWeakState> AcquireWeakCacheEntryReference(
CacheEntryImpl* entry_impl) {
CacheEntryWeakState* weak_state =
entry_impl->weak_state_.load(std::memory_order_acquire);
if (!weak_state) {
if (!entry_impl->cache_->pool_) {
return {};
}
auto* new_weak_state = new CacheEntryWeakState;
new_weak_state->entry = entry_impl;
new_weak_state->weak_references.store(1, std::memory_order_relaxed);
if (entry_impl->weak_state_.compare_exchange_strong(
weak_state, new_weak_state, std::memory_order_acq_rel)) {
entry_impl->reference_count_.fetch_add(1, std::memory_order_relaxed);
return internal::IntrusivePtr<CacheEntryWeakState>(
new_weak_state, internal::adopt_object_ref);
} else {
delete new_weak_state;
}
}
if (weak_state->weak_references.fetch_add(1, std::memory_order_acq_rel) ==
0) {
entry_impl->reference_count_.fetch_add(1, std::memory_order_relaxed);
}
return internal::IntrusivePtr<CacheEntryWeakState>(
weak_state, internal::adopt_object_ref);
}
void UpdateTotalBytes(CachePoolImpl& pool, ptrdiff_t change) {
assert(HasLruCache(&pool));
if (pool.total_bytes_.fetch_add(change, std::memory_order_acq_rel) + change <=
pool.limits_.total_bytes_limit ||
change <= 0) {
return;
}
absl::MutexLock lock(&pool.lru_mutex_);
MaybeEvictEntries(&pool);
}
}
namespace internal {
Cache::Cache() = default;
Cache::~Cache() = default;
size_t Cache::DoGetSizeInBytes(Cache::Entry* entry) {
return ((internal_cache::CacheEntryImpl*)entry)->key_.capacity() +
this->DoGetSizeofEntry();
}
CacheEntry::~CacheEntry() {
auto* weak_state = this->weak_state_.load(std::memory_order_relaxed);
if (!weak_state) return;
{
absl::MutexLock lock(&weak_state->mutex);
weak_state->entry = nullptr;
if (weak_state->weak_references.load(std::memory_order_acquire) != 0) {
return;
}
}
delete weak_state;
}
void CacheEntry::DoInitialize() {}
void CacheEntry::WriterLock() { mutex_.WriterLock(); }
void CacheEntry::WriterUnlock() {
UniqueWriterLock lock(mutex_, std::adopt_lock);
auto flags = std::exchange(flags_, 0);
if (!flags) return;
assert(flags & kSizeChanged);
auto& cache = GetOwningCache(*this);
auto* pool = cache.pool();
auto* pool_impl =
internal_cache::Access::StaticCast<internal_cache::CachePoolImpl>(pool);
if (!internal_cache::HasLruCache(pool_impl)) return;
const size_t new_size = cache.DoGetSizeInBytes(this);
ptrdiff_t change = new_size - std::exchange(num_bytes_, new_size);
lock.unlock();
internal_cache::UpdateTotalBytes(*pool_impl, change);
}
CachePool::StrongPtr CachePool::Make(const CachePool::Limits& cache_limits) {
CachePool::StrongPtr pool;
internal_cache::Access::StaticCast<internal_cache::CachePoolStrongPtr>(&pool)
->reset(new internal_cache::CachePool(cache_limits), adopt_object_ref);
return pool;
}
CachePool::StrongPtr::StrongPtr(const CachePool::WeakPtr& ptr)
: Base(ptr.get(), adopt_object_ref) {
if (!ptr) return;
auto* pool =
internal_cache::Access::StaticCast<internal_cache::CachePoolImpl>(
ptr.get());
absl::MutexLock lock(&pool->caches_mutex_);
if (pool->strong_references_.fetch_add(1, std::memory_order_acq_rel) == 0) {
internal_cache::AcquireWeakReference(pool);
for (auto* cache : pool->caches_) {
cache->reference_count_.fetch_add(
internal_cache::CacheImpl::kCachePoolStrongReferenceIncrement,
std::memory_order_acq_rel);
}
}
}
}
} | #include "tensorstore/internal/cache/cache.h"
#include <stddef.h>
#include <atomic>
#include <deque>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/testing/concurrent.h"
namespace {
using ::tensorstore::UniqueWriterLock;
using ::tensorstore::internal::Cache;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::CachePtr;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::WeakPinnedCacheEntry;
using ::tensorstore::internal_cache::Access;
using ::tensorstore::internal_cache::CacheEntryImpl;
using ::tensorstore::internal_cache::CacheImpl;
using ::tensorstore::internal_cache::CachePoolImpl;
using ::tensorstore::internal_cache::LruListNode;
using ::tensorstore::internal_testing::TestConcurrent;
using ::testing::ElementsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
CachePoolImpl* GetPoolImpl(const CachePool::StrongPtr& ptr) {
return Access::StaticCast<CachePoolImpl>(ptr.get());
}
CachePoolImpl* GetPoolImpl(const CachePool::WeakPtr& ptr) {
return Access::StaticCast<CachePoolImpl>(ptr.get());
}
class TestCache : public Cache {
public:
struct RequestLog {
absl::Mutex mutex;
std::deque<std::string> entry_allocate_log;
std::deque<std::pair<std::string, std::string>> entry_destroy_log;
std::deque<std::string> cache_allocate_log;
std::deque<std::string> cache_destroy_log;
};
class Entry : public Cache::Entry {
public:
using OwningCache = TestCache;
std::string data;
size_t size = 1;
void ChangeSize(size_t new_size) {
UniqueWriterLock<Cache::Entry> lock(*this);
size = new_size;
NotifySizeChanged();
}
~Entry() override {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->entry_destroy_log.emplace_back(cache_identifier_,
std::string(this->key()));
}
}
WeakPinnedCacheEntry weak_ref;
std::shared_ptr<RequestLog> log_;
std::string cache_identifier_;
};
explicit TestCache(std::shared_ptr<RequestLog> log = {}) : log_(log) {}
~TestCache() {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->cache_destroy_log.emplace_back(cache_identifier());
}
}
size_t DoGetSizeofEntry() override { return sizeof(Entry); }
Entry* DoAllocateEntry() override {
if (log_) {
absl::MutexLock lock(&log_->mutex);
log_->entry_allocate_log.emplace_back(cache_identifier());
}
auto* entry = new Entry;
entry->cache_identifier_ = cache_identifier();
entry->log_ = log_;
return entry;
}
void OnDelete(Entry* entry) {}
size_t DoGetSizeInBytes(Cache::Entry* base_entry) override {
auto* entry = static_cast<Entry*>(base_entry);
return entry->size;
}
std::shared_ptr<RequestLog> log_;
};
class TestCacheWithCachePool : public TestCache {
public:
using TestCache::TestCache;
CachePool::WeakPtr cache_pool;
};
using EntryIdentifier = std::pair<std::string, void*>;
std::pair<std::string, void*> GetEntryIdentifier(CacheEntryImpl* entry) {
return {entry->key_, entry};
}
absl::flat_hash_set<EntryIdentifier> GetEntrySet(LruListNode* head) {
absl::flat_hash_set<EntryIdentifier> entries;
for (LruListNode* node = head->next; node != head; node = node->next) {
entries.emplace(
GetEntryIdentifier(Access::StaticCast<CacheEntryImpl>(node)));
}
return entries;
}
void AssertInvariants(const CachePool::StrongPtr& pool,
absl::flat_hash_set<Cache*> expected_caches)
ABSL_NO_THREAD_SAFETY_ANALYSIS {
auto* pool_impl = GetPoolImpl(pool);
auto eviction_queue_entries = GetEntrySet(&pool_impl->eviction_queue_);
absl::flat_hash_set<EntryIdentifier> expected_eviction_queue_entries;
size_t expected_total_bytes = 0;
for (auto* cache : pool_impl->caches_) {
EXPECT_EQ(pool_impl, cache->pool_);
EXPECT_NE("", cache->cache_identifier_);
EXPECT_EQ(1, expected_caches.count(Access::StaticCast<Cache>(cache)));
}
EXPECT_EQ(1 + expected_caches.size(), pool_impl->weak_references_.load());
for (auto* cache : expected_caches) {
auto* cache_impl = Access::StaticCast<CacheImpl>(cache);
if (!cache_impl->cache_identifier_.empty()) {
auto it = pool_impl->caches_.find(cache_impl);
ASSERT_NE(it, pool_impl->caches_.end());
EXPECT_EQ(cache_impl, *it);
}
if (pool_impl->limits_.total_bytes_limit != 0) {
for (auto& shard : cache_impl->shards_) {
for (CacheEntryImpl* entry : shard.entries) {
EXPECT_EQ(
entry->num_bytes_,
cache->DoGetSizeInBytes(Access::StaticCast<Cache::Entry>(entry)));
expected_total_bytes += entry->num_bytes_;
if (entry->reference_count_.load() == 0) {
expected_eviction_queue_entries.emplace(GetEntryIdentifier(entry));
}
}
}
}
}
EXPECT_EQ(expected_total_bytes, pool_impl->total_bytes_);
EXPECT_THAT(expected_eviction_queue_entries,
::testing::IsSubsetOf(eviction_queue_entries));
}
#define TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(...) \
do { \
SCOPED_TRACE(""); \
AssertInvariants(__VA_ARGS__); \
} while (false)
template <typename CacheType = TestCache>
CachePtr<CacheType> GetTestCache(
CachePool* pool, std::string cache_identifier,
std::shared_ptr<TestCache::RequestLog> log = {}) {
return GetCache<CacheType>(pool, cache_identifier, [&] {
if (log) {
absl::MutexLock lock(&log->mutex);
log->cache_allocate_log.emplace_back(cache_identifier);
}
return std::make_unique<CacheType>(log);
});
}
TEST(CachePoolTest, GetCacheEmptyKey) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(pool.get(), "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
auto test_cache2 = GetTestCache(pool.get(), "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheEmptyKeyCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(nullptr, "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
auto test_cache2 = GetTestCache(nullptr, "", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_allocate_log, ElementsAre("", ""));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheNonEmptyKey) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
auto test_cache2 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
EXPECT_EQ(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("x"));
}
TEST(CachePoolTest, GetCacheNonEmptyKeyCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
{
auto test_cache1 = GetTestCache(nullptr, "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
auto test_cache2 = GetTestCache(nullptr, "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x", "x"));
EXPECT_NE(test_cache1, test_cache2);
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("", ""));
}
TEST(CachePoolTest, GetCacheNullptr) {
auto pool = CachePool::Make(CachePool::Limits{10000});
int make_cache_calls = 0;
auto make_cache = [&] {
++make_cache_calls;
return nullptr;
};
{
auto cache = GetCache<TestCache>(pool.get(), "x", make_cache);
EXPECT_EQ(nullptr, cache);
EXPECT_EQ(1, make_cache_calls);
}
{
auto cache = GetCache<TestCache>(pool.get(), "x", make_cache);
EXPECT_EQ(nullptr, cache);
EXPECT_EQ(2, make_cache_calls);
}
}
TEST(CachePoolTest, GetCacheNonEmptyKeyNoReferences) {
auto pool = CachePool::Make(CachePool::Limits{});
auto log = std::make_shared<TestCache::RequestLog>();
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
{
auto pool2 = pool;
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->strong_references_.load());
}
{
auto test_cache1 = GetTestCache(pool.get(), "x", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("x"));
EXPECT_EQ(1, GetPoolImpl(pool)->caches_.size());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, test_cache1->use_count());
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("x"));
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(0, GetPoolImpl(pool)->caches_.size());
}
TEST(CachePoolTest, StrongToWeakToStrong) {
CachePool::StrongPtr strong_ptr = CachePool::Make({});
CachePool::WeakPtr weak_ptr(strong_ptr);
strong_ptr = CachePool::StrongPtr();
strong_ptr = CachePool::StrongPtr(weak_ptr);
weak_ptr = CachePool::WeakPtr();
}
class NamedOrAnonymousCacheTest : public ::testing::TestWithParam<const char*> {
public:
std::shared_ptr<TestCache::RequestLog> log =
std::make_shared<TestCache::RequestLog>();
std::string cache_key = GetParam();
CachePtr<TestCache> GetCache(const CachePool::StrongPtr& pool) {
return GetTestCache(pool.get(), cache_key, log);
}
};
INSTANTIATE_TEST_SUITE_P(WithoutCacheKey, NamedOrAnonymousCacheTest,
::testing::Values(""));
INSTANTIATE_TEST_SUITE_P(WithCacheKey, NamedOrAnonymousCacheTest,
::testing::Values("k"));
TEST_P(NamedOrAnonymousCacheTest, CacheEntryKeepsCacheAlive) {
{
PinnedCacheEntry<TestCache> entry;
{
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
EXPECT_THAT(log->cache_allocate_log, ElementsAre(cache_key));
entry = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
}
EXPECT_EQ(1, GetOwningCache(*entry).use_count());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre(cache_key));
}
TEST_P(NamedOrAnonymousCacheTest, GetWithImmediateEvict) {
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
EXPECT_EQ(1, test_cache->use_count());
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_EQ(2, test_cache->use_count());
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
e->data = "value";
EXPECT_EQ(1, e->use_count());
{
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ(2, test_cache->use_count());
EXPECT_EQ(2, e2->use_count());
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_EQ(1, e->use_count());
EXPECT_EQ(2, test_cache->use_count());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
EXPECT_EQ(1, test_cache->use_count());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key, cache_key));
EXPECT_EQ("", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log,
ElementsAre(Pair(cache_key, "a"), Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TEST_P(NamedOrAnonymousCacheTest, GetWithoutImmediateEvict) {
{
auto pool = CachePool::Make(kSmallCacheLimits);
auto test_cache = GetCache(pool);
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
e->data = "value";
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e1 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key));
EXPECT_EQ("value", e1->data);
auto e2 = GetCacheEntry(test_cache, "b");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key, cache_key));
e2->data = "value2";
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
{
auto e1 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key, cache_key));
EXPECT_EQ("value", e1->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
{
auto e2 = GetCacheEntry(test_cache, "b");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre(cache_key, cache_key));
EXPECT_EQ("value2", e2->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a"), Pair(cache_key, "b")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre(cache_key));
}
TEST(CacheTest, NamedGetWithoutImmediateEvict) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto log = std::make_shared<TestCache::RequestLog>();
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
{
auto test_cache = GetTestCache(pool.get(), "cache", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache"));
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
e->data = "value";
auto e2 = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ(e, e2);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
{
auto test_cache = GetTestCache(pool.get(), "cache");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
{
auto e = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache"));
EXPECT_EQ("value", e->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
}
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_);
}
TEST_P(NamedOrAnonymousCacheTest, UpdateSizeThenEvict) {
auto pool = CachePool::Make(CachePool::Limits{});
auto test_cache = GetCache(pool);
{
auto entry = GetCacheEntry(test_cache, "a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre(cache_key));
entry->data = "a";
entry->ChangeSize(5000);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("", GetCacheEntry(test_cache, "a")->data);
}
TEST_P(NamedOrAnonymousCacheTest, UpdateSizeNoEvict) {
CachePool::Limits limits;
limits.total_bytes_limit = 10000;
auto pool = CachePool::Make(limits);
auto test_cache = GetCache(pool);
{
auto entry = GetCacheEntry(test_cache, "a");
entry->data = "a";
entry->ChangeSize(1);
entry->ChangeSize(5000);
entry->ChangeSize(5000);
entry->ChangeSize(5000);
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
{
auto entry = GetCacheEntry(test_cache, "b");
entry->data = "b";
entry->ChangeSize(5000);
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("a", GetCacheEntry(test_cache, "a")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("b", GetCacheEntry(test_cache, "b")->data);
GetCacheEntry(test_cache, "c")->data = "c";
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a")));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("", GetCacheEntry(test_cache, "a")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("b", GetCacheEntry(test_cache, "b")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_EQ("c", GetCacheEntry(test_cache, "c")->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {test_cache.get()});
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair(cache_key, "a")));
}
TEST(CacheTest, CacheDependsOnOtherCache) {
class CacheA : public tensorstore::internal::Cache {
using Base = tensorstore::internal::Cache;
public:
class Entry : public Cache::Entry {};
using Base::Base;
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
};
class CacheB : public tensorstore::internal::Cache {
using Base = tensorstore::internal::Cache;
public:
class Entry : public Cache::Entry {};
using Base::Base;
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
CachePtr<CacheA> cache_a;
};
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache_a = GetCache<CacheA>(pool.get(), "x",
[&] { return std::make_unique<CacheA>(); });
auto cache_b = GetCache<CacheB>(pool.get(), "x",
[&] { return std::make_unique<CacheB>(); });
GetCacheEntry(cache_b, "key");
cache_b->cache_a = cache_a;
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool,
{cache_a.get(), cache_b.get()});
}
constexpr static int kDefaultIterations = 100;
TEST(CacheTest, ConcurrentGetCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
PinnedCacheEntry<TestCache> pinned_entries[3];
TestConcurrent(
kDefaultIterations,
[] {},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(2, cache->use_count());
for (auto& e : pinned_entries) {
e.reset();
}
EXPECT_EQ(1, cache->use_count());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
},
[&] { pinned_entries[0] = GetCacheEntry(cache, "a"); },
[&] { pinned_entries[1] = GetCacheEntry(cache, "a"); },
[&] { pinned_entries[2] = GetCacheEntry(cache, "a"); });
}
TEST(CacheTest, ConcurrentGetCacheEntryWeakReferenceCacheDisabled) {
auto cache = GetTestCache(nullptr, "cache");
PinnedCacheEntry<TestCache> entry;
TestConcurrent(
kDefaultIterations,
[&] { entry = GetCacheEntry(cache, "a"); },
[&] {},
[&] { entry->AcquireWeakReference(); },
[&] { entry->AcquireWeakReference(); });
}
TEST(CacheTest,
ConcurrentDestroyStrongAndWeakCacheEntryReferenceCacheDisabled) {
auto cache = GetTestCache(nullptr, "cache");
PinnedCacheEntry<TestCache> entry;
WeakPinnedCacheEntry weak_ref;
TestConcurrent(
kDefaultIterations,
[&] {
entry = GetCacheEntry(cache, "a");
weak_ref = entry->AcquireWeakReference();
},
[&] {},
[&] { entry = {}; }, [&] { weak_ref = {}; });
}
TEST(CacheTest, ConcurrentGetCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
CachePtr<TestCache> caches[3];
TestConcurrent(
kDefaultIterations,
[] {},
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {caches[0].get(), caches[1].get(), caches[2].get()});
size_t use_count = 3;
for (auto& cache : caches) {
EXPECT_EQ(use_count, cache->use_count());
cache.reset();
--use_count;
}
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] { caches[0] = GetTestCache(pool.get(), "cache"); },
[&] { caches[1] = GetTestCache(pool.get(), "cache"); },
[&] { caches[2] = GetTestCache(pool.get(), "cache"); });
}
TEST(CacheTest, ConcurrentReleaseCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
CachePtr<TestCache> caches[3];
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
for (auto& cache : caches) {
cache = GetTestCache(pool.get(), "cache");
}
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
},
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] { caches[0].reset(); }, [&] { caches[1].reset(); },
[&] { caches[2].reset(); });
}
TEST(CacheTest, ConcurrentGetReleaseCache) {
auto pool = CachePool::Make(kSmallCacheLimits);
const auto concurrent_op = [&] {
auto cache = GetTestCache(pool.get(), "cache");
};
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
},
[&] {},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, ConcurrentReleaseCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
PinnedCacheEntry<TestCache> pinned_entries[3];
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
for (auto& e : pinned_entries) {
e = GetCacheEntry(cache, "a");
}
EXPECT_EQ(2, cache->use_count());
},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, cache->use_count());
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
},
[&] { pinned_entries[0].reset(); }, [&] { pinned_entries[1].reset(); },
[&] { pinned_entries[2].reset(); });
}
TEST(CacheTest, ConcurrentGetReleaseCacheEntry) {
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache");
const auto concurrent_op = [&] {
auto entry = GetCacheEntry(cache, "a");
};
TestConcurrent(
kDefaultIterations,
[&] {
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, cache->use_count());
},
[&] {
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache.get()});
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_EQ(1, cache->use_count());
},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, ConcurrentDestroyCacheEvictEntries) {
CachePool::Limits limits = {};
limits.total_bytes_limit = 1;
auto pool = CachePool::Make(limits);
const auto concurrent_op = [&] {
auto cache = GetTestCache(pool.get(), "");
auto entry = GetCacheEntry(cache, "a");
};
TestConcurrent(
kDefaultIterations,
[&] {},
[&] {},
concurrent_op, concurrent_op, concurrent_op);
}
TEST(CacheTest, EvictEntryDestroyCache) {
auto log = std::make_shared<TestCache::RequestLog>();
CachePool::Limits limits;
limits.total_bytes_limit = 1;
auto pool = CachePool::Make(limits);
auto cache_b = GetTestCache(pool.get(), "cache_b", log);
{
auto cache_a = GetTestCache(pool.get(), "cache_a", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_b", "cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
entry_a->data = "entry_a";
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
{
auto cache_a = GetTestCache(pool.get(), "cache_a");
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->cache_allocate_log,
ElementsAre("cache_b", "cache_a"));
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
ASSERT_EQ("entry_a", entry_a->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {cache_a.get(), cache_b.get()});
}
auto entry_b = GetCacheEntry(cache_b, "entry_b");
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache_a", "entry_a")));
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a"));
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(pool, {cache_b.get()});
{
auto cache_a = GetTestCache(pool.get(), "cache_a");
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_EQ("", entry_a->data);
TENSORSTORE_INTERNAL_ASSERT_CACHE_INVARIANTS(
pool, {cache_a.get(), cache_b.get()});
}
}
TEST(CacheTest, CachePoolWeakPtr) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(1, GetPoolImpl(pool)->weak_references_.load());
auto cache_a = GetTestCache(pool.get(), "cache_a", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
entry_a->data = "entry_a";
EXPECT_EQ(1, GetPoolImpl(pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
auto cache_b = GetTestCache(pool.get(), "cache_b", log);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_a", "cache_b"));
auto entry_b = GetCacheEntry(cache_b, "entry_b");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a", "cache_b"));
entry_b->data = "entry_b";
EXPECT_EQ(3, GetPoolImpl(pool)->weak_references_.load());
cache_a.reset();
entry_a.reset();
EXPECT_EQ(2, GetPoolImpl(pool)->weak_references_.load());
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
CachePool::WeakPtr weak_pool(pool);
EXPECT_EQ(1, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(weak_pool)->weak_references_.load());
{
CachePool::StrongPtr strong_pool(pool);
EXPECT_EQ(2, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(weak_pool)->weak_references_.load());
}
EXPECT_EQ(1, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(weak_pool)->weak_references_.load());
EXPECT_THAT(log->cache_destroy_log, ElementsAre());
pool = {};
EXPECT_EQ(0, GetPoolImpl(weak_pool)->strong_references_.load());
EXPECT_EQ(2, GetPoolImpl(weak_pool)->weak_references_.load());
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a"));
{
auto cache_c = GetTestCache(weak_pool.get(), "cache_c", log);
EXPECT_THAT(log->cache_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c"));
auto entry_c = GetCacheEntry(cache_c, "entry_c");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c"));
entry_c->data = "entry_c";
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a", "cache_c"));
CachePool::StrongPtr strong_pool(weak_pool);
EXPECT_EQ(1, GetPoolImpl(strong_pool)->strong_references_.load());
EXPECT_EQ(3, GetPoolImpl(strong_pool)->weak_references_.load());
{
auto cache_d = GetTestCache(strong_pool.get(), "cache_d", log);
EXPECT_THAT(log->cache_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c", "cache_d"));
auto entry_d = GetCacheEntry(cache_d, "entry_d");
EXPECT_THAT(log->entry_allocate_log,
ElementsAre("cache_a", "cache_b", "cache_c", "cache_d"));
entry_d->data = "entry_d";
}
EXPECT_THAT(log->cache_destroy_log, ElementsAre("cache_a", "cache_c"));
}
TEST(CacheTest, TestCacheWithCachePool) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
{
auto cache_a =
GetTestCache<TestCacheWithCachePool>(pool.get(), "cache_a", log);
cache_a->cache_pool = CachePool::WeakPtr(pool);
EXPECT_THAT(log->cache_allocate_log, ElementsAre("cache_a"));
auto entry_a = GetCacheEntry(cache_a, "entry_a");
EXPECT_THAT(log->entry_allocate_log, ElementsAre("cache_a"));
entry_a->data = "entry_a";
}
}
TEST(CacheTest, EntryWeakReference) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(CachePool::Limits{});
auto cache = GetTestCache(pool.get(), "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest, EntryWeakReferenceCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
auto cache = GetTestCache(nullptr, "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
}
TEST(CacheTest, EntryWeakReferencesCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
auto cache = GetTestCache(nullptr, "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
weak_ref = entry_a->AcquireWeakReference();
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
}
TEST(CacheTest, EntryWeakReferences) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(CachePool::Limits{});
auto cache = GetTestCache(pool.get(), "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
auto weak_ref2 = entry_a->AcquireWeakReference();
auto entry_a2 = GetCacheEntry(cache, "a");
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref2 = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
entry_a2 = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest, GetStrongEntryReferenceWhileHoldingOnlyWeakReference) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(CachePool::Limits{});
auto cache = GetTestCache(pool.get(), "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
entry_a = {};
entry_a = GetCacheEntry(cache, "a");
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest,
GetStrongEntryReferenceWhileHoldingOnlyWeakReferenceCacheDisabled) {
auto log = std::make_shared<TestCache::RequestLog>();
auto cache = GetTestCache(nullptr, "cache", log);
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
entry_a = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("", "a")));
entry_a = GetCacheEntry(cache, "a");
entry_a = {};
EXPECT_THAT(log->entry_destroy_log,
ElementsAre(Pair("", "a"), Pair("", "a")));
weak_ref = {};
EXPECT_THAT(log->entry_destroy_log,
ElementsAre(Pair("", "a"), Pair("", "a")));
}
TEST(CacheTest, PoolWithNonZeroBytesLimit) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache = GetTestCache(pool.get(), "cache", log);
{
auto entry_a = GetCacheEntry(cache, "a");
auto weak_ref = entry_a->AcquireWeakReference();
}
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
cache = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre(Pair("cache", "a")));
}
TEST(CacheTest, WeakRefOwnedByEntry) {
auto log = std::make_shared<TestCache::RequestLog>();
auto pool = CachePool::Make(kSmallCacheLimits);
auto cache1 = GetTestCache(pool.get(), "cache1", log);
auto cache2 = GetTestCache(pool.get(), "cache2", log);
{
auto entry_a = GetCacheEntry(cache1, "a");
auto entry_b = GetCacheEntry(cache1, "b");
entry_a->weak_ref = entry_b->AcquireWeakReference();
}
{ auto entry_c = GetCacheEntry(cache2, "c"); }
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
pool = {};
EXPECT_THAT(log->entry_destroy_log, ElementsAre());
cache1 = {};
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair("cache1", "a"), Pair("cache1", "b")));
cache2 = {};
EXPECT_THAT(log->entry_destroy_log,
UnorderedElementsAre(Pair("cache1", "a"), Pair("cache1", "b"),
Pair("cache2", "c")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d80a51ca-5726-403e-8f99-66364c3b3663 | cpp | google/tensorstore | utils | tensorstore/internal/grpc/utils.cc | tensorstore/internal/grpc/utils_test.cc | #include "tensorstore/internal/grpc/utils.h"
#include <grpcpp/support/status.h>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/status.h"
#define TENSORSTORE_STATUS_ASSERT(x, y) \
static_assert(static_cast<int>(grpc::StatusCode::x) == \
static_cast<int>(absl::StatusCode::y))
TENSORSTORE_STATUS_ASSERT(CANCELLED, kCancelled);
TENSORSTORE_STATUS_ASSERT(UNKNOWN, kUnknown);
TENSORSTORE_STATUS_ASSERT(INVALID_ARGUMENT, kInvalidArgument);
TENSORSTORE_STATUS_ASSERT(DEADLINE_EXCEEDED, kDeadlineExceeded);
TENSORSTORE_STATUS_ASSERT(NOT_FOUND, kNotFound);
TENSORSTORE_STATUS_ASSERT(ALREADY_EXISTS, kAlreadyExists);
TENSORSTORE_STATUS_ASSERT(PERMISSION_DENIED, kPermissionDenied);
TENSORSTORE_STATUS_ASSERT(RESOURCE_EXHAUSTED, kResourceExhausted);
TENSORSTORE_STATUS_ASSERT(FAILED_PRECONDITION, kFailedPrecondition);
TENSORSTORE_STATUS_ASSERT(ABORTED, kAborted);
TENSORSTORE_STATUS_ASSERT(OUT_OF_RANGE, kOutOfRange);
TENSORSTORE_STATUS_ASSERT(UNIMPLEMENTED, kUnimplemented);
TENSORSTORE_STATUS_ASSERT(INTERNAL, kInternal);
TENSORSTORE_STATUS_ASSERT(UNAVAILABLE, kUnavailable);
TENSORSTORE_STATUS_ASSERT(DATA_LOSS, kDataLoss);
TENSORSTORE_STATUS_ASSERT(UNAUTHENTICATED, kUnauthenticated);
#undef TENSORSTORE_STATUS_ASSERT
namespace tensorstore {
namespace internal {
absl::Status GrpcStatusToAbslStatus(grpc::Status s, SourceLocation loc) {
if (s.ok()) return absl::OkStatus();
auto absl_code = static_cast<absl::StatusCode>(s.error_code());
absl::Status status(absl_code, s.error_message());
MaybeAddSourceLocation(status, loc);
if (!s.error_details().empty()) {
status.SetPayload("grpc.Status.details", absl::Cord(s.error_details()));
}
return status;
}
grpc::Status AbslStatusToGrpcStatus(const absl::Status& status) {
if (status.ok()) return grpc::Status::OK;
auto grpc_code = static_cast<grpc::StatusCode>(status.code());
return grpc::Status(grpc_code, std::string(status.message()));
}
}
} | #include "tensorstore/internal/grpc/utils.h"
#include <grpcpp/support/status.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
namespace {
using ::tensorstore::internal::AbslStatusToGrpcStatus;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
TEST(StatusToGrpcStatus, Basic) {
EXPECT_EQ(grpc::Status::OK.error_code(),
AbslStatusToGrpcStatus(absl::OkStatus()).error_code());
}
TEST(GrpcStatusToStatus, Basic) {
EXPECT_EQ(absl::OkStatus(), GrpcStatusToAbslStatus(grpc::Status::OK));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/utils.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/utils_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ae4651cb-fc9e-4e95-b1e1-69875c74e05a | cpp | google/tensorstore | client_credentials | tensorstore/internal/grpc/client_credentials.cc | tensorstore/internal/grpc/client_credentials_test.cc | #include "tensorstore/internal/grpc/client_credentials.h"
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/security/credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
namespace tensorstore {
namespace {
ABSL_CONST_INIT static absl::Mutex credentials_mu(absl::kConstInit);
const internal::ContextResourceRegistration<GrpcClientCredentials>
grpc_client_credentials_registration;
}
bool GrpcClientCredentials::Use(
tensorstore::Context context,
std::shared_ptr<::grpc::ChannelCredentials> credentials) {
auto resource = context.GetResource<GrpcClientCredentials>().value();
absl::MutexLock l(&credentials_mu);
bool result = (resource->credentials_ == nullptr);
resource->credentials_ = std::move(credentials);
return result;
}
std::shared_ptr<::grpc::ChannelCredentials>
GrpcClientCredentials::Resource::GetCredentials() {
absl::MutexLock l(&credentials_mu);
if (credentials_) return credentials_;
return grpc::InsecureChannelCredentials();
}
} | #include "tensorstore/internal/grpc/client_credentials.h"
#include <memory>
#include <gtest/gtest.h>
#include "grpcpp/security/credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::GrpcClientCredentials;
TEST(GrpcClientCredentials, Use) {
auto use = grpc::experimental::LocalCredentials(LOCAL_TCP);
auto ctx = tensorstore::Context::Default();
EXPECT_TRUE(GrpcClientCredentials::Use(ctx, use));
auto a = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
EXPECT_EQ(a.get(), use.get());
}
TEST(GrpcClientCredentials, Default) {
auto ctx = tensorstore::Context::Default();
auto a = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
auto b = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
EXPECT_NE(a.get(), b.get());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/client_credentials.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/client_credentials_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6c5b0579-7cd8-435d-8637-831145e2962d | cpp | google/tensorstore | server_credentials | tensorstore/internal/grpc/server_credentials.cc | tensorstore/internal/grpc/server_credentials_test.cc | #include "tensorstore/internal/grpc/server_credentials.h"
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace {
ABSL_CONST_INIT static absl::Mutex credentials_mu(absl::kConstInit);
const internal::ContextResourceRegistration<GrpcServerCredentials>
grpc_server_credentials_registration;
}
bool GrpcServerCredentials::Use(
tensorstore::Context context,
std::shared_ptr<::grpc::ServerCredentials> credentials) {
auto resource = context.GetResource<GrpcServerCredentials>().value();
absl::MutexLock l(&credentials_mu);
bool result = (resource->credentials_ == nullptr);
resource->credentials_ = std::move(credentials);
return result;
}
std::shared_ptr<::grpc::ServerCredentials>
GrpcServerCredentials::Resource::GetCredentials() {
absl::MutexLock l(&credentials_mu);
if (credentials_) return credentials_;
return grpc::InsecureServerCredentials();
}
} | #include "tensorstore/internal/grpc/server_credentials.h"
#include <gtest/gtest.h>
#include "grpcpp/security/server_credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::GrpcServerCredentials;
TEST(GrpcServerCredentials, Use) {
auto use = grpc::experimental::LocalServerCredentials(LOCAL_TCP);
auto ctx = tensorstore::Context::Default();
EXPECT_TRUE(GrpcServerCredentials::Use(ctx, use));
auto a = ctx.GetResource<GrpcServerCredentials>().value()->GetCredentials();
EXPECT_EQ(a.get(), use.get());
}
TEST(GrpcServerCredentials, Default) {
auto ctx = tensorstore::Context::Default();
auto a = ctx.GetResource<GrpcServerCredentials>().value()->GetCredentials();
auto b = ctx.GetResource<GrpcServerCredentials>().value()->GetCredentials();
EXPECT_NE(a.get(), b.get());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/server_credentials.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/server_credentials_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b3a54a4f-5588-4272-a712-c729944c2433 | cpp | google/tensorstore | sha256 | tensorstore/internal/digest/sha256.cc | tensorstore/internal/digest/sha256_test.cc | #include "tensorstore/internal/digest/sha256.h"
#include <string_view>
#include "absl/strings/cord.h"
namespace tensorstore {
namespace internal {
void SHA256Digester::Write(const absl::Cord& cord) {
for (std::string_view chunk : cord.Chunks()) {
Write(chunk);
}
}
}
} | #include "tensorstore/internal/digest/sha256.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
using ::tensorstore::internal::SHA256Digester;
namespace {
TEST(Sha256Digest, Basic) {
auto digest = [](auto input) {
SHA256Digester digester;
digester.Write(input);
auto digest = digester.Digest();
return absl::BytesToHexString(std::string_view(
reinterpret_cast<char*>(digest.data()), digest.size()));
};
EXPECT_THAT(
digest(std::string_view("abc")),
testing::Eq(
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"));
EXPECT_THAT(
digest(absl::Cord("abc")),
testing::Eq(
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/digest/sha256.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/digest/sha256_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
22592641-0513-4ddb-bc56-908b24bd89c8 | cpp | google/tensorstore | gce_auth_provider | tensorstore/internal/oauth2/gce_auth_provider.cc | tensorstore/internal/oauth2/gce_auth_provider_test.cc | #include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include <functional>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <utility>
#include "absl/flags/flag.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
ABSL_FLAG(std::optional<std::string>, tensorstore_gce_metadata_root,
std::nullopt,
"Url to used for http access metadata.google.internal. "
"Overrides GCE_METADATA_ROOT.");
namespace tensorstore {
namespace internal_oauth2 {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal::GetFlagOrEnvValue;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
constexpr static auto ServiceAccountInfoBinder = jb::Object(
jb::Member("email",
jb::Projection(&GceAuthProvider::ServiceAccountInfo::email,
jb::NonEmptyStringBinder)),
jb::Member("scopes",
jb::Projection(&GceAuthProvider::ServiceAccountInfo::scopes)),
jb::DiscardExtraMembers);
}
std::string GceMetadataHostname() {
return GetFlagOrEnvValue(FLAGS_tensorstore_gce_metadata_root,
"GCE_METADATA_ROOT")
.value_or("metadata.google.internal");
}
GceAuthProvider::GceAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport,
const ServiceAccountInfo& service_account_info,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
service_account_email_(service_account_info.email),
scopes_(service_account_info.scopes.begin(),
service_account_info.scopes.end()),
transport_(std::move(transport)) {}
Result<HttpResponse> GceAuthProvider::IssueRequest(std::string path,
bool recursive) {
HttpRequestBuilder request_builder(
"GET", internal::JoinPath("http:
request_builder.AddHeader("Metadata-Flavor: Google");
if (recursive) {
request_builder.AddQueryParameter("recursive", "true");
}
return transport_->IssueRequest(request_builder.BuildRequest(), {}).result();
}
Result<GceAuthProvider::ServiceAccountInfo>
GceAuthProvider::GetDefaultServiceAccountInfoIfRunningOnGce(
internal_http::HttpTransport* transport) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto response,
transport
->IssueRequest(
HttpRequestBuilder(
"GET",
internal::JoinPath(
"http:
"/computeMetadata/v1/instance/service-accounts/default/"))
.AddHeader("Metadata-Flavor: Google")
.AddQueryParameter("recursive", "true")
.BuildRequest(),
{})
.result());
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
auto info_response = internal::ParseJson(response.payload.Flatten());
if (info_response.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Failed to parse service account response: ",
response.payload.Flatten()));
}
return jb::FromJson<ServiceAccountInfo>(info_response,
ServiceAccountInfoBinder);
}
Result<BearerTokenWithExpiration> GceAuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto response,
IssueRequest(
tensorstore::StrCat("/computeMetadata/v1/instance/service-accounts/",
service_account_email_, "/token"),
false));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/time/clock.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::GceAuthProvider;
const char kOAuthResponse[] = R"(
{
"token_type" : "refresh",
"access_token": "abc",
"expires_in": 456
}
)";
class TestAuthProvider : public GceAuthProvider {
public:
TestAuthProvider()
: GceAuthProvider(nullptr, {"nobody@nowhere.com", {"abc", "xyz"}},
[this] { return this->time; }),
time(absl::Now()),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string path, bool recursive) {
request.emplace_back(std::move(path));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::string> request;
};
TEST(GceAuthProviderTest, InitialState) {
TestAuthProvider auth;
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(GceAuthProviderTest, Status200) {
TestAuthProvider auth;
auth.responses = {
{0, {200, absl::Cord(kOAuthResponse), {}}},
{1, {200, absl::Cord(kOAuthResponse), {}}},
};
EXPECT_FALSE(auth.IsValid());
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
TEST(GceAuthProviderTest, NoResponse) {
TestAuthProvider auth;
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ(
"/computeMetadata/v1/instance/service-accounts/nobody@nowhere.com/token",
auth.request[0]);
}
TEST(GceAuthProviderTest, Status400) {
TestAuthProvider auth;
auth.responses = {
{0, {400, absl::Cord(kOAuthResponse), {}}},
};
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_FALSE(result.ok()) << result.status();
}
TEST(GceAuthProviderTest, Hostname) {
EXPECT_EQ("metadata.google.internal",
tensorstore::internal_oauth2::GceMetadataHostname());
tensorstore::internal::SetEnv("GCE_METADATA_ROOT", "localhost");
EXPECT_EQ("localhost", tensorstore::internal_oauth2::GceMetadataHostname());
tensorstore::internal::UnsetEnv("GCE_METADATA_ROOT");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/gce_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/gce_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
52112969-3335-4fe7-9ed1-cd823a39f211 | cpp | google/tensorstore | oauth2_auth_provider | tensorstore/internal/oauth2/oauth2_auth_provider.cc | tensorstore/internal/oauth2/oauth2_auth_provider_test.cc | #include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_oauth2 {
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
std::string MakePayload(const internal_oauth2::RefreshToken& creds) {
auto client_id = internal::PercentEncodeUriComponent(creds.client_id);
auto client_secret = internal::PercentEncodeUriComponent(creds.client_secret);
auto refresh_token = internal::PercentEncodeUriComponent(creds.refresh_token);
return tensorstore::StrCat(
"grant_type=refresh_token", "&client_id=", client_id,
"&client_secret=", client_secret, "&refresh_token=", refresh_token);
}
}
OAuth2AuthProvider::OAuth2AuthProvider(
const RefreshToken& creds, std::string uri,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
refresh_payload_(MakePayload(creds)),
uri_(std::move(uri)),
transport_(std::move(transport)) {}
Result<HttpResponse> OAuth2AuthProvider::IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord payload) {
return transport_
->IssueRequest(
HttpRequestBuilder(method, std::string{uri}).BuildRequest(),
internal_http::IssueRequestOptions(std::move(payload)))
.result();
}
Result<BearerTokenWithExpiration> OAuth2AuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto response, IssueRequest("POST", uri_, absl::Cord(refresh_payload_)));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/time/clock.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::OAuth2AuthProvider;
const char kServiceAccountInfo[] = R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})";
constexpr char kOAuthV3Url[] = "https:
class TestAuthProvider : public OAuth2AuthProvider {
public:
TestAuthProvider(const RefreshToken& creds)
: OAuth2AuthProvider(creds, kOAuthV3Url, nullptr,
[this] { return this->time; }),
time(absl::Now()),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord body) {
request.push_back(std::make_pair(std::string(uri), std::string(body)));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::pair<std::string, std::string>> request;
};
TEST(OAuth2AuthProviderTest, InitialState) {
TestAuthProvider auth({"a", "b", "c"});
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(OAuth2AuthProviderTest, NoResponse) {
TestAuthProvider auth({"a", "b", "c"});
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[0].second);
}
TEST(OAuth2AuthProviderTest, Status200) {
TestAuthProvider auth({"a", "b", "c"});
auth.responses = {
{0,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
{1,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
};
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[0].second);
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
ASSERT_EQ(2, auth.request.size());
EXPECT_EQ("https:
auth.request[1].first);
EXPECT_EQ(
"grant_type=refresh_token&client_id=a&client_secret=b&refresh_token=c",
auth.request[1].second);
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/oauth2_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/oauth2_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
bdacba29-a14b-4563-ba5a-c2c9730812a0 | cpp | google/tensorstore | google_service_account_auth_provider | tensorstore/internal/oauth2/google_service_account_auth_provider.cc | tensorstore/internal/oauth2/google_service_account_auth_provider_test.cc | #include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_oauth2 {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
constexpr char kOAuthV4Url[] = "https:
constexpr char kOAuthScope[] = "https:
GoogleServiceAccountAuthProvider::GoogleServiceAccountAuthProvider(
const AccountCredentials& creds,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
creds_(creds),
uri_(kOAuthV4Url),
scope_(kOAuthScope),
transport_(std::move(transport)) {}
Result<HttpResponse> GoogleServiceAccountAuthProvider::IssueRequest(
std::string_view method, std::string_view uri, absl::Cord payload) {
return transport_
->IssueRequest(
HttpRequestBuilder(method, std::string{uri})
.AddHeader("Content-Type: application/x-www-form-urlencoded")
.BuildRequest(),
internal_http::IssueRequestOptions(std::move(payload)))
.result();
}
Result<BearerTokenWithExpiration> GoogleServiceAccountAuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto body,
internal_oauth2::BuildSignedJWTRequest(
creds_.private_key,
internal_oauth2::BuildJWTHeader(creds_.private_key_id),
internal_oauth2::BuildJWTClaimBody(creds_.client_email, scope_, uri_,
now, 3600 )));
TENSORSTORE_ASSIGN_OR_RETURN(
auto response, IssueRequest("POST", uri_, absl::Cord(std::move(body))));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::GoogleServiceAccountAuthProvider;
using ::tensorstore::internal_oauth2::GoogleServiceAccountCredentials;
const char kServiceAccountInfo[] = R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})";
const GoogleServiceAccountCredentials kCreds{
"a1a111aa1111a11a11a11aa111a111a1a1111111",
GetFakePrivateKey(),
"https:
"foo-email@foo-project.iam.gserviceaccount.com",
};
constexpr char kBody[] =
"grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&"
"assertion="
"eyJhbGciOiJSUzI1NiIsImtpZCI6ImExYTExMWFhMTExMWExMWExMWExMWFhMTExYTExMWExYT"
"ExMTExMTEiLCJ0eXAiOiJKV1QifQ."
"eyJhdWQiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjQvdG9rZW4iLCJleH"
"AiOjE1NDc2Njk3MDMsImlhdCI6MTU0NzY2NjEwMywiaXNzIjoiZm9vLWVtYWlsQGZvby1wcm9q"
"ZWN0LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2NvcGUiOiJodHRwczovL3d3dy5nb29nbG"
"VhcGlzLmNvbS9hdXRoL2Nsb3VkLXBsYXRmb3JtIn0.gvM1sjnFXwQkBTTqobnTJqE8ZCrAR-"
"SEevEZB4Quqxd836v7iHjnWBiOkUCZl_o5wQouz5pFuhkQ1BlhhAZNih_Ko2yxBi0W_NuhI-"
"18We8gSMhi8pwfNu6WqNqXkHlQAJebhJQH23yP_A2dxU3Z50maUJaAl9G0e60CIynsaeW-"
"o7QneaPxPEWjOi--XMvkOu-z8eD0CXx1dUrlzINDxWzJFoXzCk2_NZ9-"
"UPzHWai68qKo2FjbtTT3fEPA-L1IN908OWhuN2UHdvPrg_"
"h13GO7kY3K7TsWotsgsLon2KxWYaDpasaY_ZqCIXCeS4jW89gVtsOB3E6B-xdR1Gq-9g";
class TestAuthProvider : public GoogleServiceAccountAuthProvider {
public:
TestAuthProvider(const GoogleServiceAccountCredentials& creds)
: GoogleServiceAccountAuthProvider(creds, nullptr,
[this] { return this->time; }),
time(absl::FromUnixSeconds(1547666103)),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord body) {
request.push_back(std::make_pair(std::string(uri), std::string(body)));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::pair<std::string, std::string>> request;
};
TEST(GoogleServiceAccountAuthProviderTest, InitialState) {
TestAuthProvider auth({"a", "b", "c", "d"});
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(GoogleServiceAccountAuthProviderTest, BadKeys) {
TestAuthProvider auth({"a", "b", "c", "d"});
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
EXPECT_EQ(0, auth.request.size());
}
TEST(OAuth2AuthProviderTest, NoResponse) {
TestAuthProvider auth(kCreds);
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(kBody, auth.request[0].second);
}
TEST(GoogleServiceAccountAuthProviderTest, Status200) {
TestAuthProvider auth(kCreds);
auth.responses = {
{0,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
{1,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
};
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(1, auth.request.size());
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(2, auth.request.size());
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_service_account_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_service_account_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9a5566ba-75db-493f-aca1-ceb37b0190f6 | cpp | google/tensorstore | google_auth_provider | tensorstore/internal/oauth2/google_auth_provider.cc | tensorstore/internal/oauth2/google_auth_provider_test.cc | #include "tensorstore/internal/oauth2/google_auth_provider.h"
#include <algorithm>
#include <fstream>
#include <functional>
#include <memory>
#include <new>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_oauth2 {
namespace {
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::JoinPath;
constexpr char kGoogleAuthTokenForTesting[] = "GOOGLE_AUTH_TOKEN_FOR_TESTING";
constexpr char kGoogleApplicationCredentials[] =
"GOOGLE_APPLICATION_CREDENTIALS";
constexpr char kCloudSdkConfig[] = "CLOUDSDK_CONFIG";
constexpr char kGCloudConfigFolder[] = ".config/gcloud/";
constexpr char kWellKnownCredentialsFile[] =
"application_default_credentials.json";
constexpr char kOAuthV3Url[] = "https:
bool IsFile(const std::string& filename) {
std::ifstream fstream(filename.c_str());
return fstream.good();
}
Result<std::string> GetEnvironmentVariableFileName() {
auto env = GetEnv(kGoogleApplicationCredentials);
if (!env || !IsFile(*env)) {
return absl::NotFoundError(tensorstore::StrCat(
"$", kGoogleApplicationCredentials, " is not set or corrupt."));
}
return *env;
}
Result<std::string> GetWellKnownFileName() {
std::string result;
auto config_dir_override = GetEnv(kCloudSdkConfig);
if (config_dir_override) {
result = JoinPath(*config_dir_override, kWellKnownCredentialsFile);
} else {
auto home_dir = GetEnv("HOME");
if (!home_dir) {
return absl::NotFoundError("Could not read $HOME.");
}
result =
JoinPath(*home_dir, kGCloudConfigFolder, kWellKnownCredentialsFile);
}
if (!IsFile(result)) {
return absl::NotFoundError(
tensorstore::StrCat("Could not find the credentials file in the "
"standard gcloud location [",
result, "]"));
}
return result;
}
struct AuthProviderRegistry {
std::vector<std::pair<int, GoogleAuthProvider>> providers;
absl::Mutex mutex;
};
AuthProviderRegistry& GetGoogleAuthProviderRegistry() {
static absl::NoDestructor<AuthProviderRegistry> registry;
return *registry;
}
Result<std::unique_ptr<AuthProvider>> GetDefaultGoogleAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport) {
std::unique_ptr<AuthProvider> result;
auto var = GetEnv(kGoogleAuthTokenForTesting);
if (var) {
ABSL_LOG(INFO) << "Using GOOGLE_AUTH_TOKEN_FOR_TESTING";
result.reset(new FixedTokenAuthProvider(*std::move(var)));
return std::move(result);
}
absl::Status status;
auto credentials_filename = GetEnvironmentVariableFileName();
if (!credentials_filename) {
credentials_filename = GetWellKnownFileName();
}
if (credentials_filename.ok()) {
ABSL_LOG(INFO) << "Using credentials at " << *credentials_filename;
std::ifstream credentials_fstream(*credentials_filename);
auto json = ::nlohmann::json::parse(credentials_fstream, nullptr, false);
auto refresh_token = internal_oauth2::ParseRefreshToken(json);
if (refresh_token.ok()) {
ABSL_LOG(INFO) << "Using OAuth2 AuthProvider";
result.reset(new OAuth2AuthProvider(*refresh_token, kOAuthV3Url,
std::move(transport)));
return std::move(result);
}
auto service_account =
internal_oauth2::ParseGoogleServiceAccountCredentials(json);
if (service_account.ok()) {
ABSL_LOG(INFO) << "Using ServiceAccount AuthProvider";
result.reset(new GoogleServiceAccountAuthProvider(*service_account,
std::move(transport)));
return std::move(result);
}
status = absl::UnknownError(
tensorstore::StrCat("Unexpected content of the JSON credentials file: ",
*credentials_filename));
}
if (auto gce_service_account =
GceAuthProvider::GetDefaultServiceAccountInfoIfRunningOnGce(
transport.get());
gce_service_account.ok()) {
ABSL_LOG(INFO) << "Running on GCE, using service account "
<< gce_service_account->email;
result.reset(
new GceAuthProvider(std::move(transport), *gce_service_account));
return std::move(result);
}
if (!credentials_filename.ok()) {
ABSL_LOG(ERROR)
<< credentials_filename.status().message()
<< ". You may specify a credentials file using $"
<< kGoogleApplicationCredentials
<< ", or to use Google application default credentials, run: "
"gcloud auth application-default login";
}
TENSORSTORE_RETURN_IF_ERROR(status);
return absl::NotFoundError(
"Could not locate the credentials file and not running on GCE.");
}
struct SharedGoogleAuthProviderState {
absl::Mutex mutex;
std::optional<Result<std::shared_ptr<AuthProvider>>> auth_provider
ABSL_GUARDED_BY(mutex);
};
SharedGoogleAuthProviderState& GetSharedGoogleAuthProviderState() {
static absl::NoDestructor<SharedGoogleAuthProviderState> state;
return *state;
}
}
void RegisterGoogleAuthProvider(GoogleAuthProvider provider, int priority) {
auto& registry = GetGoogleAuthProviderRegistry();
absl::WriterMutexLock lock(®istry.mutex);
registry.providers.emplace_back(priority, std::move(provider));
std::sort(registry.providers.begin(), registry.providers.end(),
[](const auto& a, const auto& b) { return a.first < b.first; });
}
Result<std::unique_ptr<AuthProvider>> GetGoogleAuthProvider(
std::shared_ptr<internal_http::HttpTransport> transport) {
{
auto& registry = GetGoogleAuthProviderRegistry();
absl::ReaderMutexLock lock(®istry.mutex);
for (const auto& provider : registry.providers) {
auto auth_result = provider.second();
if (auth_result.ok()) return auth_result;
}
}
return internal_oauth2::GetDefaultGoogleAuthProvider(std::move(transport));
}
Result<std::shared_ptr<AuthProvider>> GetSharedGoogleAuthProvider() {
auto& state = GetSharedGoogleAuthProviderState();
absl::MutexLock lock(&state.mutex);
if (!state.auth_provider) {
state.auth_provider.emplace(GetGoogleAuthProvider());
}
return *state.auth_provider;
}
void ResetSharedGoogleAuthProvider() {
auto& state = GetSharedGoogleAuthProviderState();
absl::MutexLock lock(&state.mutex);
state.auth_provider = std::nullopt;
}
}
} | #include "tensorstore/internal/oauth2/google_auth_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/oauth2/auth_provider.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "tensorstore/internal/oauth2/gce_auth_provider.h"
#include "tensorstore/internal/oauth2/google_auth_test_utils.h"
#include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth2_auth_provider.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_http::ApplyResponseToHandler;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpResponseHandler;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
using ::tensorstore::internal_oauth2::AuthProvider;
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::GetGoogleAuthProvider;
using ::tensorstore::internal_oauth2::GoogleAuthTestScope;
class TestData
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteApplicationDefaultCredentials() {
auto p = JoinPath(path(), "application_default_credentials.json");
std::ofstream ofs(p);
ofs << R"({
"client_id": "fake-client-id.apps.googleusercontent.com",
"client_secret": "fake-client-secret",
"refresh_token": "fake-refresh-token",
"type": "authorized_user"
})";
return p;
}
std::string WriteServiceAccountCredentials() {
auto p = JoinPath(path(), "service_account_credentials.json");
std::ofstream ofs(p);
ofs << R"({
"type": "service_account",
"project_id": "fake_project_id",
"private_key_id": "fake_key_id",
"client_email": "fake-test-project.iam.gserviceaccount.com",
"client_id": "fake_client_id",
"auth_uri": "https:
"token_uri": "https:
"auth_provider_x509_cert_url": "https:
"client_x509_cert_url": "https:
)";
ofs << " \"private_key\": \"" << absl::CEscape(GetFakePrivateKey())
<< "\" }";
return p;
}
};
class MetadataMockTransport : public HttpTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) override {
ApplyResponseToHandler(
[&]() -> tensorstore::Result<HttpResponse> {
auto parsed = tensorstore::internal::ParseGenericUri(request.url);
if (!absl::StartsWith(parsed.authority_and_path,
"metadata.google.internal")) {
return absl::UnimplementedError("Mock cannot satisfy the request.");
}
constexpr char kOAuthPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/user@nowhere.com/token";
if (absl::StartsWith(parsed.authority_and_path, kOAuthPath)) {
if (!has_service_account_) {
return HttpResponse{404, absl::Cord()};
}
return HttpResponse{
200,
absl::Cord(
R"({ "token_type" : "refresh", "access_token": "abc", "expires_in": 3600 })")};
}
constexpr char kServiceAccountPath[] =
"metadata.google.internal/computeMetadata/v1/"
"instance/service-accounts/default/";
if (absl::StartsWith(parsed.authority_and_path,
kServiceAccountPath)) {
if (!has_service_account_) {
return HttpResponse{404, absl::Cord()};
}
return HttpResponse{
200,
absl::Cord(
R"({ "email": "user@nowhere.com", "scopes": [ "test" ] })")};
}
return HttpResponse{200, absl::Cord()};
}(),
response_handler);
}
void set_has_service_account(bool has_service_account) {
has_service_account_ = has_service_account;
}
bool has_service_account_ = false;
};
class GoogleAuthProviderTest : public ::testing::Test {
public:
GoogleAuthTestScope google_auth_test_scope;
static void SetUpTestSuite() {
SetDefaultHttpTransport(mock_transport);
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
}
static void TearDownTestSuite() {
tensorstore::internal_oauth2::ResetSharedGoogleAuthProvider();
SetDefaultHttpTransport(nullptr);
}
static std::shared_ptr<MetadataMockTransport> mock_transport;
};
std::shared_ptr<MetadataMockTransport> GoogleAuthProviderTest::mock_transport =
std::make_shared<MetadataMockTransport>();
TEST_F(GoogleAuthProviderTest, Invalid) {
SetEnv("GCE_METADATA_ROOT", "invalidmetadata.google.internal");
auto auth_provider = GetGoogleAuthProvider();
EXPECT_FALSE(auth_provider.ok());
UnsetEnv("GCE_METADATA_ROOT");
}
TEST_F(GoogleAuthProviderTest, AuthTokenForTesting) {
SetEnv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "abc");
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::FixedTokenAuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
std::unique_ptr<AuthProvider> auth = std::move(*auth_provider);
auto token = auth->GetToken();
ASSERT_TRUE(token.ok());
EXPECT_EQ("abc", token->token);
}
TEST_F(GoogleAuthProviderTest, GoogleOAuth2AccountCredentialsFromSDKConfig) {
TestData test_data;
test_data.WriteServiceAccountCredentials();
test_data.WriteApplicationDefaultCredentials();
SetEnv("CLOUDSDK_CONFIG", test_data.path().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::OAuth2AuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GoogleOAuth2AccountCredentials) {
TestData test_data;
SetEnv("GOOGLE_APPLICATION_CREDENTIALS",
test_data.WriteApplicationDefaultCredentials().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::OAuth2AuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GoogleServiceAccountCredentials) {
TestData test_data;
SetEnv("GOOGLE_APPLICATION_CREDENTIALS",
test_data.WriteServiceAccountCredentials().c_str());
auto auth_provider = GetGoogleAuthProvider();
ASSERT_TRUE(auth_provider.ok()) << auth_provider.status();
{
auto instance = dynamic_cast<
tensorstore::internal_oauth2::GoogleServiceAccountAuthProvider*>(
auth_provider->get());
EXPECT_FALSE(instance == nullptr);
}
}
TEST_F(GoogleAuthProviderTest, GceWithServiceAccount) {
mock_transport->set_has_service_account(true);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto auth_provider, GetGoogleAuthProvider());
{
auto instance =
dynamic_cast<tensorstore::internal_oauth2::GceAuthProvider*>(
auth_provider.get());
EXPECT_FALSE(instance == nullptr);
}
EXPECT_THAT(auth_provider->GetAuthHeader(),
::testing::Optional(std::string("Authorization: Bearer abc")));
}
TEST_F(GoogleAuthProviderTest, GceWithoutServiceAccount) {
mock_transport->set_has_service_account(false);
EXPECT_THAT(GetGoogleAuthProvider(),
tensorstore::MatchesStatus(absl::StatusCode::kNotFound));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c8f07261-9d3f-4de8-84ea-813e017e46e1 | cpp | google/tensorstore | fixed_token_auth_provider | tensorstore/internal/oauth2/fixed_token_auth_provider.cc | tensorstore/internal/oauth2/fixed_token_auth_provider_test.cc | #include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include "absl/time/time.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
FixedTokenAuthProvider::FixedTokenAuthProvider(std::string token)
: token_(token) {}
Result<BearerTokenWithExpiration> FixedTokenAuthProvider::GetToken() {
return BearerTokenWithExpiration{token_, absl::InfiniteFuture()};
}
}
} | #include "tensorstore/internal/oauth2/fixed_token_auth_provider.h"
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal_oauth2::FixedTokenAuthProvider;
TEST(FixedTokenAuthProvider, Minimal) {
FixedTokenAuthProvider auth("token");
auto result = auth.GetToken();
EXPECT_TRUE(result.ok());
EXPECT_EQ("token", result->token);
EXPECT_LT(absl::Now(), result->expiration);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/fixed_token_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/fixed_token_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
98ae27e5-041b-4de0-ada7-423479d36c4d | cpp | google/tensorstore | oauth_utils | tensorstore/internal/oauth2/oauth_utils.cc | tensorstore/internal/oauth2/oauth_utils_test.cc | #include "tensorstore/internal/oauth2/oauth_utils.h"
#include <stddef.h>
#include <memory>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/time/time.h"
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/pem.h>
#include <openssl/rsa.h>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace {
constexpr char kCryptoAlgorithm[] = "RS256";
constexpr char kJwtType[] = "JWT";
constexpr char kGrantType[] =
"urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer";
}
namespace internal_oauth2 {
Result<std::string> SignWithRSA256(std::string_view private_key,
std::string_view to_sign) {
if (private_key.empty()) {
return absl::InternalError("No private key provided.");
}
const auto md = EVP_sha256();
assert(md != nullptr);
auto md_ctx = std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)>(
EVP_MD_CTX_create(), &EVP_MD_CTX_free);
assert(md_ctx != nullptr);
auto pem_buffer = std::unique_ptr<BIO, decltype(&BIO_free)>(
BIO_new_mem_buf(static_cast<const char*>(private_key.data()),
static_cast<int>(private_key.length())),
&BIO_free);
if (!pem_buffer) {
return absl::InternalError("Could not create the PEM buffer.");
}
auto key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
PEM_read_bio_PrivateKey(
static_cast<BIO*>(pem_buffer.get()),
nullptr,
nullptr,
nullptr),
&EVP_PKEY_free);
if (!key) {
return absl::InternalError("Could not load the private key.");
}
if (EVP_DigestSignInit(md_ctx.get(), nullptr, md, nullptr, key.get()) != 1) {
return absl::InternalError("DigestInit failed.");
}
if (EVP_DigestSignUpdate(md_ctx.get(), to_sign.data(), to_sign.size()) != 1) {
return absl::InternalError("DigestUpdate failed.");
}
size_t sig_len = 0;
if (EVP_DigestSignFinal(md_ctx.get(), nullptr, &sig_len) != 1) {
return absl::InternalError("DigestFinal (get signature length) failed.");
}
std::unique_ptr<unsigned char[]> sig(new unsigned char[sig_len]);
if (EVP_DigestSignFinal(md_ctx.get(), sig.get(), &sig_len) != 1) {
return absl::InternalError("DigestFinal (signature compute) failed.");
}
std::string signature;
absl::WebSafeBase64Escape(
std::string_view(reinterpret_cast<char*>(sig.get()), sig_len),
&signature);
return std::move(signature);
}
std::string BuildJWTHeader(std::string_view key_id) {
::nlohmann::json assertion_header = {
{"alg", kCryptoAlgorithm},
{"typ", kJwtType},
{"kid", std::string(key_id)},
};
std::string encoded_header;
absl::WebSafeBase64Escape(assertion_header.dump(), &encoded_header);
return encoded_header;
}
std::string BuildJWTClaimBody(std::string_view client_email,
std::string_view scope,
std::string_view audience, absl::Time now,
std::int64_t lifetime) {
const std::int64_t request_timestamp_sec = absl::ToUnixSeconds(now);
const std::int64_t expiration_timestamp_sec =
request_timestamp_sec + lifetime;
::nlohmann::json assertion_payload = {
{"iss", std::string(client_email)}, {"scope", std::string(scope)},
{"aud", std::string(audience)}, {"iat", request_timestamp_sec},
{"exp", expiration_timestamp_sec},
};
std::string encoded_payload;
absl::WebSafeBase64Escape(assertion_payload.dump(), &encoded_payload);
return encoded_payload;
}
Result<std::string> BuildSignedJWTRequest(std::string_view private_key,
std::string_view header,
std::string_view body) {
auto claim = tensorstore::StrCat(header, ".", body);
auto result = SignWithRSA256(private_key, claim);
if (!result) {
return result.status();
}
return tensorstore::StrCat("grant_type=", kGrantType, "&assertion=", claim,
".", *result);
}
constexpr static auto ErrorResponseBinder = jb::Object(
jb::Member("error",
jb::Projection(&ErrorResponse::error, jb::NonEmptyStringBinder)),
jb::Member("error_description",
jb::Projection(&ErrorResponse::error_description,
jb::NonEmptyStringBinder)),
jb::Member("error_uri", jb::Projection(&ErrorResponse::error_uri,
jb::NonEmptyStringBinder)),
jb::Member("error_subtype", jb::Projection(&ErrorResponse::error_subtype,
jb::NonEmptyStringBinder)),
jb::DiscardExtraMembers);
Result<ErrorResponse> ParseErrorResponse(const ::nlohmann::json& error) {
if (error.is_discarded()) {
return absl::InvalidArgumentError("Invalid ErrorResponse");
}
return jb::FromJson<ErrorResponse>(error, ErrorResponseBinder);
}
constexpr static auto GoogleServiceAccountCredentialsBinder = jb::Object(
jb::Member("private_key",
jb::Projection(&GoogleServiceAccountCredentials::private_key,
jb::NonEmptyStringBinder)),
jb::Member("private_key_id",
jb::Projection(&GoogleServiceAccountCredentials::private_key_id,
jb::NonEmptyStringBinder)),
jb::Member("client_email",
jb::Projection(&GoogleServiceAccountCredentials::client_email,
jb::NonEmptyStringBinder)),
jb::Member("token_uri",
jb::Projection(&GoogleServiceAccountCredentials::token_uri,
jb::DefaultInitializedValue())),
jb::DiscardExtraMembers);
Result<GoogleServiceAccountCredentials>
ParseGoogleServiceAccountCredentialsImpl(const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::InvalidArgumentError(
"Invalid GoogleServiceAccountCredentials token");
}
auto creds_token = jb::FromJson<GoogleServiceAccountCredentials>(
credentials, GoogleServiceAccountCredentialsBinder);
if (!creds_token.ok()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GoogleServiceAccountCredentials: ", creds_token.status()));
}
return creds_token;
}
Result<GoogleServiceAccountCredentials> ParseGoogleServiceAccountCredentials(
std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GoogleServiceAccountCredentials: ", source));
}
return ParseGoogleServiceAccountCredentialsImpl(credentials);
}
constexpr static auto RefreshTokenBinder = jb::Object(
jb::Member("client_id", jb::Projection(&RefreshToken::client_id,
jb::NonEmptyStringBinder)),
jb::Member("client_secret", jb::Projection(&RefreshToken::client_secret,
jb::NonEmptyStringBinder)),
jb::Member("refresh_token", jb::Projection(&RefreshToken::refresh_token,
jb::NonEmptyStringBinder)),
jb::DiscardExtraMembers);
Result<RefreshToken> ParseRefreshTokenImpl(
const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::UnauthenticatedError("Invalid RefreshToken token");
}
auto refresh_token =
jb::FromJson<RefreshToken>(credentials, RefreshTokenBinder);
if (!refresh_token.ok()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid RefreshToken: ", credentials.dump()));
}
return refresh_token;
}
Result<RefreshToken> ParseRefreshToken(std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid RefreshToken: ", source));
}
return ParseRefreshTokenImpl(credentials);
}
constexpr static auto OAuthResponseBinder = jb::Object(
jb::Member("token_type", jb::Projection(&OAuthResponse::token_type,
jb::NonEmptyStringBinder)),
jb::Member("access_token", jb::Projection(&OAuthResponse::access_token,
jb::NonEmptyStringBinder)),
jb::Member("expires_in", jb::Projection(&OAuthResponse::expires_in,
jb::LooseInteger<int64_t>(1))),
jb::DiscardExtraMembers);
Result<OAuthResponse> ParseOAuthResponseImpl(
const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::UnauthenticatedError("Invalid OAuthResponse token");
}
auto response_token =
jb::FromJson<OAuthResponse>(credentials, OAuthResponseBinder);
if (!response_token.ok()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid OAuthResponse: ", credentials.dump()));
}
return response_token;
}
Result<OAuthResponse> ParseOAuthResponse(std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid OAuthResponse: ", source));
}
return ParseOAuthResponseImpl(credentials);
}
}
} | #include "tensorstore/internal/oauth2/oauth_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::ParseGoogleServiceAccountCredentials;
using ::tensorstore::internal_oauth2::ParseOAuthResponse;
using ::tensorstore::internal_oauth2::ParseRefreshToken;
std::string GetJsonKeyFileContents() {
constexpr char kJsonKeyfilePrefix[] = R"""({
"type": "service_account",
"project_id": "foo-project",
"private_key_id": "a1a111aa1111a11a11a11aa111a111a1a1111111",
"client_email": "foo-email@foo-project.iam.gserviceaccount.com",
"client_id": "100000000000000000001",
"auth_uri": "https:
"token_uri": "https:
"auth_provider_x509_cert_url": "https:
"client_x509_cert_url": "https:
)""";
return tensorstore::StrCat(kJsonKeyfilePrefix, " \"private_key\": \"",
absl::CEscape(GetFakePrivateKey()), "\" }");
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentials_Invalid) {
EXPECT_FALSE(ParseGoogleServiceAccountCredentials("{ }").ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "",
"private_key_id": "",
"client_email": "",
"token_uri": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "",
"private_key_id": "abc",
"client_email": "456"
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "",
"client_email": "456"
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456"
"token_uri": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key_id": "abc",
"client_email": "456",
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"client_email": "456",
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
})")
.ok());
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentials) {
auto result = ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456",
"token_uri": "wxy"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().private_key);
EXPECT_EQ("abc", result.value().private_key_id);
EXPECT_EQ("456", result.value().client_email);
EXPECT_EQ("wxy", result.value().token_uri);
result = ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().private_key);
EXPECT_EQ("abc", result.value().private_key_id);
EXPECT_EQ("456", result.value().client_email);
EXPECT_EQ("", result.value().token_uri);
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentialsFile) {
auto result = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("foo-email@foo-project.iam.gserviceaccount.com",
result->client_email);
}
TEST(OAuthUtilTest, ParseRefreshToken_Invalid) {
EXPECT_FALSE(ParseRefreshToken("{ }").ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "",
"client_secret": "",
"refresh_token": ""
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "",
"client_secret": "abc",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": ""
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": 456
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_secret": "abc",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"json({
"error": "invalid_grant",
"error_description": "reauth related error (invalid_rapt)",
"error_uri": "https:
"error_subtype": "invalid_rapt"
})json")
.ok());
}
TEST(OAuthUtilTest, ParseRefreshToken) {
auto result = ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": "456"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().client_id);
EXPECT_EQ("abc", result.value().client_secret);
EXPECT_EQ("456", result.value().refresh_token);
}
TEST(OAuthUtilTest, ParseOAuthResponse_Invalid) {
EXPECT_FALSE(ParseOAuthResponse("{ }").ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "",
"access_token": "abc",
"expires_in": 456
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "123",
"access_token": "",
"expires_in": 456
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "123",
"access_token": "abc",
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"error": "invalid_grant",
"error_description": "reauth related error (invalid_rapt)",
"error_uri": "https:
"error_subtype": "invalid_rapt"
})json")
.ok());
}
TEST(OAuthUtilTest, ParseOAuthResponse) {
EXPECT_TRUE(ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": "456"
})")
.ok());
auto result = ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().token_type);
EXPECT_EQ("abc", result.value().access_token);
EXPECT_EQ(456, result.value().expires_in);
result = ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456,
"extra_fields": "are ignored"
})");
ASSERT_TRUE(result.ok()) << result.status();
}
TEST(OAuthUtilTest, BuildJWTClaimTest) {
using ::tensorstore::internal_oauth2::BuildJWTClaimBody;
using ::tensorstore::internal_oauth2::BuildJWTHeader;
EXPECT_EQ("eyJhbGciOiJSUzI1NiIsImtpZCI6ImEiLCJ0eXAiOiJKV1QifQ",
BuildJWTHeader("a"));
EXPECT_EQ(
"eyJhdWQiOiI0IiwiZXhwIjoxNTQ3NjY5NzAzLCJpYXQiOjE1NDc2NjYxMDMsImlzcyI6ImIi"
"LCJzY29wZSI6ImMifQ",
BuildJWTClaimBody("b", "c", "4", absl::FromUnixSeconds(1547666103),
3600));
}
TEST(OAuthUtilTest, Sign) {
using ::tensorstore::internal_oauth2::SignWithRSA256;
{
auto result = SignWithRSA256("", "something");
EXPECT_FALSE(result.ok());
}
{
constexpr char kBadKey[] =
"-----BEGIN PRIVATE KEY-----\n"
"Z23x2ZUyar6i0BQ8eJFAEN+IiUapEeCVazuxJSt4RjYfwSa/"
"p117jdZGEWD0GxMC\nlUtj+/nH3HDQjM4ltYfTPUg=\n"
"-----END PRIVATE KEY-----\n";
auto result = SignWithRSA256(kBadKey, "something");
EXPECT_FALSE(result.ok());
}
auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(creds.ok());
{
auto result = SignWithRSA256(creds->private_key, "something");
ASSERT_TRUE(result.ok());
EXPECT_EQ(
"A-sH4BVqtxu-6LECWJCb0VKGDj46pnpBpZB1KViuhG2CwugRVR6V3-"
"w8eBvAUbIRewSnXp_lWkxdy_rZBMau9VuILnLOC0t692-"
"L8WEqHsoFYBWvTZGCT5XkslVXhxt4d8jgM6U_8If4Cf3fGA4XAxpP-pyrbPGz-"
"VXn6R7jcLGOLsFtcuAXpJ9zkwYE72pGUtI_hiU-"
"tquIEayOQW9frXJlxt2oR4ld1l3p0FWibkNY8OfYPdTlRS0WcsgpWngTamHEBplJ5xNLD5"
"Ye5bG1DFqBJn0evxW0btbcfKCYuyirvgvHPsTt-"
"YMcPGo1xtlhT5c4ycEHOObFUGDpKPjljw",
*result);
}
}
TEST(OAuthUtilTest, BuildJWTRequestBody) {
using ::tensorstore::internal_oauth2::BuildSignedJWTRequest;
auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(creds.ok());
auto result =
BuildSignedJWTRequest(creds->private_key, "header", "something");
ASSERT_TRUE(result.ok());
EXPECT_EQ(
"grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&"
"assertion=header.something.LyvY9ZVG6tL34g5Wji--3G5JGQP-"
"fza47yBQIrRHJqecVUTVGuEXti_deBjSbB36gvpBOE67-U9h1wgD2VR_"
"MDx8JaQHGct04gVZdKC7m4uqu5lI8u0jqXGG4UbRwfUMZ0UCjxJfyUbg6KUR7iyiqoH5szZv"
"31rJISnM4RQvH-lQFrE6BuXpvB09Hve4T3q5mtq7E9pd5rXz_"
"vlqL5ib5tkdBEg2cbydDZHeCx5uA9qcg3hGidrU1fLgreFKu3dSvzu4qFZL3-"
"0Pnt4XMqwslx2vBbFQB7_K8Dnz10F1TA5njOvwFRWNjKM1I0cRZ5N3O1CnGv1wyAz-"
"FIcKdk5_7Q",
*result);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/oauth_utils.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/oauth_utils_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a967d760-b018-42d6-a3dc-cae0cbac31b8 | cpp | google/tensorstore | concurrent | tensorstore/internal/testing/concurrent.cc | tensorstore/internal/testing/concurrent_test.cc | #include "tensorstore/internal/testing/concurrent.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
namespace tensorstore {
namespace internal_testing {
#ifdef _WIN32
TestConcurrentLock::TestConcurrentLock() {
handle_ = ::CreateMutexA(nullptr,
FALSE,
"TensorStoreTestConcurrentMutex");
ABSL_CHECK(handle_ != nullptr);
if (::WaitForSingleObject(handle_, 0 ) != WAIT_OBJECT_0) {
ABSL_LOG(INFO) << "Waiting on WIN32 Concurrent Lock";
ABSL_CHECK(::WaitForSingleObject(handle_, INFINITE) == WAIT_OBJECT_0);
}
}
TestConcurrentLock::~TestConcurrentLock() {
ABSL_CHECK(::ReleaseMutex(handle_));
::CloseHandle(handle_);
}
#endif
}
} | #include "tensorstore/internal/testing/concurrent.h"
#include <atomic>
#include <type_traits>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h"
namespace {
using ::tensorstore::internal_testing::TestConcurrent;
TEST(TestConcurrent, EnsureContentionHappens) {
static constexpr int kIterations = 100;
static constexpr int kN = 20;
absl::Mutex lock;
int uncontended{0};
TestConcurrent<kN>(
kIterations,
[&] {},
[&] {},
[&](auto) {
if (lock.TryLock()) {
uncontended++;
lock.Unlock();
}
});
int contended = (kIterations * kN) - uncontended;
ABSL_LOG(INFO) << "Contended in " << contended << " of 2000 iterations.";
}
TEST(TestConcurrent, Example1) {
static constexpr int kIterations = 100;
std::atomic<int> sum{0};
TestConcurrent(
kIterations,
[&] {},
[&] {},
[&]() { sum += 1; }, [&]() { sum += 2; }, [&]() { sum += 3; });
EXPECT_EQ(100 + 200 + 300, sum);
}
template <typename T>
struct TestConcurrentFixture : public ::testing::Test {};
using ConcurrentOpSizes = ::testing::Types<std::integral_constant<int, 1>,
std::integral_constant<int, 4>,
std::integral_constant<int, 16>>;
TYPED_TEST_SUITE(TestConcurrentFixture, ConcurrentOpSizes);
TYPED_TEST(TestConcurrentFixture, Example2) {
static constexpr int kN = TypeParam{}();
static constexpr int kIterations = 100;
std::atomic<int> sum{0};
TestConcurrent<kN>(
kIterations,
[&] {},
[&] {}, [&](auto i) { sum += (i + 1); });
EXPECT_EQ((kIterations / 2) * kN * (kN + 1), sum);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/testing/concurrent.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/testing/concurrent_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b8ea089a-3733-4567-b660-481e6bcb3155 | cpp | google/tensorstore | intrusive_red_black_tree | tensorstore/internal/container/intrusive_red_black_tree.cc | tensorstore/internal/container/intrusive_red_black_tree_test.cc | #include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include <stddef.h>
#include <array>
#include <cassert>
#include <utility>
namespace tensorstore {
namespace internal {
namespace intrusive_red_black_tree {
namespace ops {
inline void SetParent(NodeData* node, NodeData* parent) {
node->rbtree_parent_ = {parent, node->rbtree_parent_.tag()};
}
inline void SetColor(NodeData* node, Color color) {
node->rbtree_parent_.set_tag(color);
}
inline Direction ChildDir(NodeData* node) {
return static_cast<Direction>(node != ops::Child(ops::Parent(node), kLeft));
}
inline NodeData* Grandparent(NodeData* node) {
return ops::Parent(ops::Parent(node));
}
void Rotate(NodeData*& root, NodeData* x, Direction dir) {
auto* y = ops::Child(x, !dir);
ops::Child(x, !dir) = ops::Child(y, dir);
if (ops::Child(y, dir)) {
ops::SetParent(ops::Child(y, dir), x);
}
ops::SetParent(y, ops::Parent(x));
if (!ops::Parent(x)) {
root = y;
} else {
ops::Child(ops::Parent(x), ops::ChildDir(x)) = y;
}
ops::Child(y, dir) = x;
ops::SetParent(x, y);
}
bool InsertFixup(NodeData*& root, NodeData* z) {
assert(ops::IsRed(z));
while (ops::IsRed(ops::Parent(z))) {
Direction dir = ops::ChildDir(ops::Parent(z));
if (NodeData* y = ops::Child(ops::Grandparent(z), !dir); ops::IsRed(y)) {
ops::SetColor(ops::Parent(z), kBlack);
ops::SetColor(y, kBlack);
ops::SetColor(ops::Grandparent(z), kRed);
z = ops::Grandparent(z);
} else {
if (ops::ChildDir(z) == !dir) {
z = ops::Parent(z);
ops::Rotate(root, z, dir);
}
ops::SetColor(ops::Parent(z), kBlack);
ops::SetColor(ops::Grandparent(z), kRed);
ops::Rotate(root, ops::Grandparent(z), !dir);
assert(!ops::IsRed(ops::Parent(z)));
break;
}
}
const Color existing_color = ops::GetColor(root);
ops::SetColor(root, kBlack);
return existing_color == kRed;
}
struct TreeWithBlackHeight {
NodeData* root = nullptr;
size_t black_height = 0;
};
size_t BlackHeight(NodeData* node) {
size_t black_height = 0;
while (node) {
if (ops::GetColor(node) == kBlack) ++black_height;
node = ops::Child(node, kLeft);
}
return black_height;
}
TreeWithBlackHeight Join(TreeWithBlackHeight a_tree, NodeData* center,
TreeWithBlackHeight b_tree, Direction a_dir) {
assert(a_tree.black_height == ops::BlackHeight(a_tree.root));
assert(b_tree.black_height == ops::BlackHeight(b_tree.root));
if (a_tree.black_height < b_tree.black_height) {
a_dir = !a_dir;
std::swap(a_tree, b_tree);
}
size_t difference = a_tree.black_height - b_tree.black_height;
NodeData* a_graft = a_tree.root;
NodeData* a_graft_parent = nullptr;
while (true) {
if (!ops::IsRed(a_graft)) {
if (difference == 0) break;
--difference;
}
a_graft_parent = a_graft;
a_graft = ops::Child(a_graft, !a_dir);
}
assert(!ops::IsRed(a_graft));
ops::SetColor(center, kRed);
ops::SetParent(center, a_graft_parent);
if (a_graft_parent) {
ops::Child(a_graft_parent, !a_dir) = center;
} else {
a_tree.root = center;
}
ops::Child(center, a_dir) = a_graft;
if (a_graft) {
ops::SetParent(a_graft, center);
}
ops::Child(center, !a_dir) = b_tree.root;
if (b_tree.root) {
ops::SetParent(b_tree.root, center);
}
a_tree.black_height += ops::InsertFixup(a_tree.root, center);
return a_tree;
}
TreeWithBlackHeight ExtractSubtreeWithBlackHeight(NodeData* child,
size_t black_height) {
TreeWithBlackHeight tree{child, black_height};
if (child) {
ops::SetParent(child, nullptr);
if (ops::GetColor(child) == kRed) {
++tree.black_height;
ops::SetColor(child, kBlack);
}
}
return tree;
}
NodeData* ExtremeNode(NodeData* x, Direction dir) {
assert(x);
while (auto* child = ops::Child(x, dir)) x = child;
return x;
}
NodeData* TreeExtremeNode(NodeData* root, Direction dir) {
if (!root) return nullptr;
return ops::ExtremeNode(root, dir);
}
NodeData* Traverse(NodeData* x, Direction dir) {
if (auto* child = ops::Child(x, dir)) {
return ops::ExtremeNode(child, !dir);
}
auto* y = ops::Parent(x);
while (y && x == ops::Child(y, dir)) {
x = y;
y = ops::Parent(y);
}
return y;
}
void Insert(NodeData*& root, NodeData* parent, Direction direction,
NodeData* new_node) {
if (!parent) {
assert(!root);
root = new_node;
} else {
if (ops::Child(parent, direction)) {
parent = ops::Traverse(parent, direction);
direction = !direction;
}
ops::Child(parent, direction) = new_node;
}
ops::SetParent(new_node, parent);
ops::Child(new_node, kLeft) = nullptr;
ops::Child(new_node, kRight) = nullptr;
ops::SetColor(new_node, kRed);
ops::InsertFixup(root, new_node);
}
NodeData* Join(NodeData* a_tree, NodeData* center, NodeData* b_tree,
Direction a_dir) {
return ops::Join({a_tree, ops::BlackHeight(a_tree)}, center,
{b_tree, ops::BlackHeight(b_tree)}, a_dir)
.root;
}
NodeData* Join(NodeData* a_tree, NodeData* b_tree, Direction a_dir) {
if (!a_tree) return b_tree;
if (!b_tree) return a_tree;
auto* center = ops::ExtremeNode(a_tree, !a_dir);
ops::Remove(a_tree, center);
return ops::Join(a_tree, center, b_tree, a_dir);
}
std::array<NodeData*, 2> Split(NodeData* root, NodeData* center) {
std::array<TreeWithBlackHeight, 2> split_trees;
size_t center_black_height = ops::BlackHeight(center);
size_t child_black_height =
center_black_height - (ops::GetColor(center) == kBlack);
for (int dir = 0; dir < 2; ++dir) {
split_trees[dir] = ops::ExtractSubtreeWithBlackHeight(
ops::Child(center, static_cast<Direction>(dir)), child_black_height);
}
NodeData* parent = ops::Parent(center);
while (parent) {
Direction dir =
static_cast<Direction>(ops::Child(parent, kRight) == center);
NodeData* grandparent = ops::Parent(parent);
auto parent_color = ops::GetColor(parent);
split_trees[!dir] =
ops::Join(split_trees[!dir], parent,
ops::ExtractSubtreeWithBlackHeight(ops::Child(parent, !dir),
center_black_height),
dir);
center = parent;
parent = grandparent;
center_black_height += (parent_color == kBlack);
}
assert(center == root);
return {{split_trees[0].root, split_trees[1].root}};
}
std::array<NodeData*, 2> Split(NodeData* root, NodeData*& center, Direction dir,
bool found) {
if (!center) return {{nullptr, nullptr}};
auto split_trees = ops::Split(root, center);
if (!found) {
ops::InsertExtreme(split_trees[!dir], dir, center);
center = nullptr;
}
return split_trees;
}
void InsertExtreme(NodeData*& root, Direction dir, NodeData* new_node) {
ops::Insert(root, ops::TreeExtremeNode(root, dir), dir, new_node);
}
void Remove(NodeData*& root, NodeData* z) {
NodeData* y;
if (!ops::Child(z, kLeft) || !ops::Child(z, kRight)) {
y = z;
} else {
y = ops::Traverse(z, kRight);
}
NodeData* x =
ops::Child(y, static_cast<Direction>(ops::Child(y, kLeft) == nullptr));
NodeData* px = ops::Parent(y);
if (x) {
ops::SetParent(x, px);
}
if (!px) {
root = x;
} else {
ops::Child(px, ops::ChildDir(y)) = x;
}
const Color color_removed = ops::GetColor(y);
if (y != z) {
if (px == z) px = y;
Replace(root, z, y);
} else {
z->rbtree_parent_ = ops::DisconnectedParentValue();
}
if (color_removed == kRed) {
return;
}
while (px && !ops::IsRed(x)) {
const Direction dir = static_cast<Direction>(x == ops::Child(px, kRight));
NodeData* w = ops::Child(px, !dir);
assert(w != nullptr);
if (ops::GetColor(w) == kRed) {
ops::SetColor(w, kBlack);
ops::SetColor(px, kRed);
ops::Rotate(root, px, dir);
w = ops::Child(px, !dir);
}
assert(ops::GetColor(w) == kBlack);
if (!ops::IsRed(ops::Child(w, kLeft)) &&
!ops::IsRed(ops::Child(w, kRight))) {
ops::SetColor(w, kRed);
x = px;
px = ops::Parent(x);
} else {
if (!ops::IsRed(ops::Child(w, !dir))) {
ops::SetColor(ops::Child(w, dir), kBlack);
ops::SetColor(w, kRed);
ops::Rotate(root, w, !dir);
w = ops::Child(px, !dir);
}
ops::SetColor(w, ops::GetColor(px));
ops::SetColor(px, kBlack);
ops::SetColor(ops::Child(w, !dir), kBlack);
ops::Rotate(root, px, dir);
x = root;
px = nullptr;
}
}
if (x) ops::SetColor(x, kBlack);
}
void Replace(NodeData*& root, NodeData* existing, NodeData* replacement) {
*replacement = *existing;
for (int dir = 0; dir < 2; ++dir) {
if (ops::Child(replacement, static_cast<Direction>(dir))) {
ops::SetParent(ops::Child(replacement, static_cast<Direction>(dir)),
replacement);
}
}
if (!ops::Parent(existing)) {
root = replacement;
} else {
ops::Child(ops::Parent(existing), ops::ChildDir(existing)) = replacement;
}
existing->rbtree_parent_ = ops::DisconnectedParentValue();
}
}
}
}
} | #include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/compare.h"
namespace {
namespace rbtree = tensorstore::internal::intrusive_red_black_tree;
namespace ops = tensorstore::internal::intrusive_red_black_tree::ops;
int CheckInvariants(ops::NodeData* x) {
if (!x) return 1;
ops::NodeData* c1 = ops::Child(x, rbtree::kLeft);
ops::NodeData* c2 = ops::Child(x, rbtree::kRight);
if (c1) {
EXPECT_EQ(x, ops::Parent(c1));
}
if (c2) {
EXPECT_EQ(x, ops::Parent(c2));
}
if (ops::GetColor(x) == rbtree::kRed) {
EXPECT_FALSE(ops::IsRed(c1));
EXPECT_FALSE(ops::IsRed(c2));
}
int lh = CheckInvariants(c1);
int rh = CheckInvariants(c2);
EXPECT_EQ(lh, rh);
if (ops::GetColor(x) == rbtree::kRed) {
return lh;
} else {
return lh + 1;
}
}
template <typename Node, typename Tag, typename Compare>
void CheckInvariants(rbtree::Tree<Node, Tag>& x, Compare compare) {
auto* root = static_cast<rbtree::NodeBase<Tag>*>(x.root());
if (!root) return;
EXPECT_EQ(rbtree::kBlack, ops::GetColor(root));
CheckInvariants(root);
EXPECT_TRUE(std::is_sorted(
x.begin(), x.end(), [&](Node& a, Node& b) { return compare(a, b) < 0; }));
}
struct Set {
struct Node : public rbtree::NodeBase<> {
int value;
};
static void FormatNode(std::string& out, const std::string& prefix,
Node* node, bool dir) {
out += prefix;
out += (dir == rbtree::kLeft) ? "|- " : " - ";
if (!node) {
out += "null";
} else {
out += std::to_string(node->value);
out += ops::GetColor(node) == rbtree::kBlack ? "(blk)" : "(red)";
}
out += '\n';
if (!node) return;
std::string child_prefix =
prefix + ((dir == rbtree::kLeft) ? "| " : " ");
for (int dir = 0; dir < 2; ++dir) {
FormatNode(out, child_prefix,
static_cast<Node*>(
ops::Child(node, static_cast<rbtree::Direction>(dir))),
static_cast<rbtree::Direction>(dir));
}
}
static std::string FormatTree(rbtree::Tree<Node>& tree) {
std::string out;
FormatNode(out, "", tree.root(), rbtree::kRight);
return out;
}
static auto CompareToKey(int key) {
return [key](Node& node) -> absl::weak_ordering {
return tensorstore::internal::DoThreeWayComparison(std::less<int>{}, key,
node.value);
};
}
static auto CompareNodes() {
return [](Node& a, Node& b) -> absl::weak_ordering {
return tensorstore::internal::CompareResultAsWeakOrdering(a.value -
b.value);
};
}
static std::vector<int> Elements(rbtree::Tree<Node>& tree) {
std::vector<int> elements;
for (auto& node : tree) {
elements.push_back(node.value);
}
return elements;
}
using Tree = rbtree::Tree<Node>;
Tree tree;
std::set<int> golden_set;
void CheckTreeInvariants() {
SCOPED_TRACE("\n" + FormatTree(tree));
CheckInvariants(tree, CompareNodes());
}
bool Contains(int key) {
bool result = tree.Find(CompareToKey(key)).found;
EXPECT_EQ(result, golden_set.count(key) == 1);
return result;
}
Node* FindNode(int key) {
auto* node = tree.Find(CompareToKey(key)).found_node();
assert(node);
return node;
}
bool Insert(int key) {
auto [node, inserted] = tree.FindOrInsert(CompareToKey(key), [&] {
auto* n = new Node;
n->value = key;
return n;
});
EXPECT_EQ(key, node->value);
CheckTreeInvariants();
EXPECT_EQ(inserted, golden_set.insert(key).second);
return inserted;
}
bool Erase(int key) {
auto node = tree.Find(CompareToKey(key)).found_node();
bool result;
if (!node) {
result = false;
} else {
tree.Remove(*node);
delete node;
CheckTreeInvariants();
result = true;
}
EXPECT_EQ(static_cast<int>(result), golden_set.erase(key));
return result;
}
void CheckElements() {
EXPECT_THAT(Elements(), ::testing::ElementsAreArray(golden_set.begin(),
golden_set.end()));
}
void CheckSplitJoin(int key) {
auto orig_elements = Elements();
auto split_result = tree.FindSplit([&](Node& node) -> absl::weak_ordering {
return tensorstore::internal::DoThreeWayComparison(std::less<>{}, key,
node.value);
});
SCOPED_TRACE("Key=" + std::to_string(key) +
"\nLeft tree:\n" + FormatTree(split_result.trees[0]) +
"\nRight tree:\n" + FormatTree(split_result.trees[1]));
for (int i = 0; i < 2; ++i) {
CheckInvariants(split_result.trees[i], CompareNodes());
}
std::vector<int> elements_a = Elements(split_result.trees[0]);
std::vector<int> elements_b = Elements(split_result.trees[1]);
std::vector<int> combined_elements = elements_a;
if (split_result.center) {
EXPECT_EQ(key, split_result.center->value);
combined_elements.push_back(split_result.center->value);
}
combined_elements.insert(combined_elements.end(), elements_b.begin(),
elements_b.end());
EXPECT_THAT(combined_elements, ::testing::ElementsAreArray(orig_elements));
if (split_result.center) {
tree = Tree::Join(split_result.trees[0], *split_result.center,
split_result.trees[1]);
} else {
tree = Tree::Join(split_result.trees[0], split_result.trees[1]);
}
CheckTreeInvariants();
CheckElements();
}
void CheckSplitJoin() {
auto orig_elements = Elements();
if (orig_elements.empty()) {
CheckSplitJoin(0);
} else {
int min = orig_elements.front() - 1;
int max = orig_elements.back() + 1;
for (int x = min; x <= max; ++x) {
SCOPED_TRACE(x);
CheckSplitJoin(x);
}
}
}
std::vector<int> Elements() { return Elements(tree); }
~Set() {
for (auto it = tree.begin(); it != tree.end();) {
auto next = std::next(it);
tree.Remove(*it);
delete &*it;
it = next;
}
}
};
TEST(SetTest, SimpleInsert1) {
Set rbtree_set;
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(1);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(2);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(3);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
}
TEST(SetTest, SimpleInsert2) {
Set rbtree_set;
Set::Tree::Range empty_range = rbtree_set.tree;
EXPECT_TRUE(empty_range.empty());
EXPECT_EQ(empty_range, empty_range);
rbtree_set.Insert(5);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(8);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(1);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(3);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(9);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(7);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(0);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
Set::Tree::Range full_range = rbtree_set.tree;
EXPECT_FALSE(full_range.empty());
EXPECT_EQ(full_range, full_range);
EXPECT_NE(full_range, empty_range);
EXPECT_EQ(full_range.begin(), rbtree_set.tree.begin());
EXPECT_EQ(full_range.end(), rbtree_set.tree.end());
Set::Tree::Range partial_range(rbtree_set.FindNode(1),
rbtree_set.FindNode(5));
EXPECT_NE(partial_range, full_range);
EXPECT_NE(partial_range, empty_range);
std::set<int> partial_elements;
for (auto& node : partial_range) {
partial_elements.insert(node.value);
}
EXPECT_THAT(partial_elements, ::testing::ElementsAre(1, 3));
}
TEST(SetTest, RandomInsert) {
Set rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
for (int i = 0; i < 20; ++i) {
const int key = absl::Uniform(gen, 0, kMaxKey);
rbtree_set.Contains(key);
rbtree_set.Insert(key);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
}
}
TEST(SetTest, RandomInsertRemove) {
Set rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
for (int i = 0; i < 50; ++i) {
const int key = absl::Uniform(gen, 0, kMaxKey);
if (absl::Bernoulli(gen, 0.5)) {
rbtree_set.Insert(key);
} else {
rbtree_set.Erase(key);
}
}
}
struct MultiSet {
using Pair = std::pair<int, int>;
struct Node : public rbtree::NodeBase<> {
Pair value;
};
struct Compare {
bool operator()(const Pair& a, const Pair& b) const {
return a.first < b.first;
}
};
using Tree = rbtree::Tree<Node>;
Tree tree;
std::multiset<Pair, Compare> golden_set;
constexpr static auto ThreeWayCompare = [](Node& a, Node& b) {
return tensorstore::internal::CompareResultAsWeakOrdering(a.value.first -
b.value.first);
};
void CheckTreeInvariants() { CheckInvariants(tree, ThreeWayCompare); }
void Insert(Pair value) {
tree.FindOrInsert(
[&](Node& node) {
return value.first < node.value.first ? absl::weak_ordering::less
: absl::weak_ordering::greater;
},
[&] {
auto* n = new Node;
n->value = value;
return n;
});
CheckTreeInvariants();
golden_set.insert(value);
}
void CheckElements() {
EXPECT_THAT(Elements(), ::testing::ElementsAreArray(golden_set.begin(),
golden_set.end()));
}
std::vector<Pair> Elements() {
std::vector<Pair> elements;
for (auto& node : tree) {
elements.push_back(node.value);
}
return elements;
}
~MultiSet() {
for (auto it = tree.begin(); it != tree.end();) {
auto next = std::next(it);
tree.Remove(*it);
delete &*it;
it = next;
}
}
};
TEST(MultiSetTest, SimpleInsert1) {
MultiSet rbtree_set;
rbtree_set.Insert({1, 2});
rbtree_set.CheckElements();
rbtree_set.Insert({2, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({1, 1});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 1});
rbtree_set.CheckElements();
EXPECT_THAT(
rbtree_set.Elements(),
::testing::ElementsAre(::testing::Pair(1, 2), ::testing::Pair(1, 1),
::testing::Pair(2, 0), ::testing::Pair(3, 0),
::testing::Pair(3, 1)));
}
TEST(MultiSetTest, SimpleInsert2) {
MultiSet rbtree_set;
rbtree_set.Insert({5, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({8, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({1, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({9, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({7, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({0, 0});
rbtree_set.CheckElements();
}
TEST(MultiSetTest, RandomInsert) {
MultiSet rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
constexpr int kMaxValue = 100;
for (int i = 0; i < 20; ++i) {
rbtree_set.Insert(
{absl::Uniform(gen, 0, kMaxKey), absl::Uniform(gen, 0, kMaxValue)});
rbtree_set.CheckElements();
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/intrusive_red_black_tree.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/intrusive_red_black_tree_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ccb60d2c-936c-4373-8466-533bea7ac7ff | cpp | google/tensorstore | same | tensorstore/internal/json/same.cc | tensorstore/internal/json/same_test.cc | #include "tensorstore/internal/json/same.h"
#include <variant>
#include "absl/container/inlined_vector.h"
#include <nlohmann/json.hpp>
namespace tensorstore {
namespace internal_json {
bool JsonSame(const ::nlohmann::json& a, const ::nlohmann::json& b) {
using value_t = ::nlohmann::json::value_t;
using array_t = ::nlohmann::json::array_t;
using object_t = ::nlohmann::json::object_t;
struct ArrayIterators {
array_t::const_iterator a_cur, a_end, b_cur;
};
struct ObjectIterators {
object_t::const_iterator a_cur, a_end, b_cur;
};
using StackEntry = std::variant<ArrayIterators, ObjectIterators>;
absl::InlinedVector<StackEntry, 64> stack;
const auto compare_or_defer_values = [&](const ::nlohmann::json& a_value,
const ::nlohmann::json& b_value) {
const auto t = a_value.type();
switch (t) {
case value_t::discarded:
case value_t::null:
return b_value.type() == t;
case value_t::array: {
if (b_value.type() != t) return false;
const auto& a_arr = a_value.get_ref<const array_t&>();
const auto& b_arr = b_value.get_ref<const array_t&>();
if (a_arr.size() != b_arr.size()) return false;
if (a_arr.empty()) return true;
stack.emplace_back(
ArrayIterators{a_arr.begin(), a_arr.end(), b_arr.begin()});
return true;
}
case value_t::object: {
if (b_value.type() != t) return false;
const auto& a_obj = a_value.get_ref<const object_t&>();
const auto& b_obj = b_value.get_ref<const object_t&>();
if (a_obj.size() != b_obj.size()) return false;
if (a_obj.empty()) return true;
stack.emplace_back(
ObjectIterators{a_obj.begin(), a_obj.end(), b_obj.begin()});
return true;
}
default:
return a_value == b_value;
}
};
if (!compare_or_defer_values(a, b)) return false;
while (!stack.empty()) {
auto& e = stack.back();
if (auto* array_iterators = std::get_if<ArrayIterators>(&e)) {
auto& a_v = *array_iterators->a_cur;
auto& b_v = *array_iterators->b_cur;
if (++array_iterators->a_cur == array_iterators->a_end) {
stack.pop_back();
} else {
++array_iterators->b_cur;
}
if (!compare_or_defer_values(a_v, b_v)) {
return false;
}
} else {
auto* object_iterators = std::get_if<ObjectIterators>(&e);
auto& a_kv = *object_iterators->a_cur;
auto& b_kv = *object_iterators->b_cur;
if (++object_iterators->a_cur == object_iterators->a_end) {
stack.pop_back();
} else {
++object_iterators->b_cur;
}
if (a_kv.first != b_kv.first ||
!compare_or_defer_values(a_kv.second, b_kv.second)) {
return false;
}
}
}
return true;
}
}
} | #include "tensorstore/internal/json/same.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
TEST(JsonSame, Basic) {
EXPECT_TRUE(tensorstore::internal_json::JsonSame(1.0, 1));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
::nlohmann::json::value_t::discarded, ::nlohmann::json::value_t::null));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::value_t::discarded,
::nlohmann::json::value_t::discarded));
EXPECT_TRUE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{1, {1, 2, 3, {{"a", 5}, {"b", 7}}}, 3},
{1, {1, 2, 3, {{"a", 5}, {"b", 7}}}, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::array_t{}, ::nlohmann::json::array_t{}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::object_t{}, ::nlohmann::json::object_t{}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2, 4}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{1, ::nlohmann::json::value_t::discarded, 3},
{1, ::nlohmann::json::value_t::discarded, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 4}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"c", 3}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}, {"d", 4}}));
const auto make_nested = [](int depth) {
::nlohmann::json value;
::nlohmann::json* tail = &value;
for (int i = 0; i < depth; ++i) {
*tail = ::nlohmann::json::object_t();
auto& obj = tail->get_ref<::nlohmann::json::object_t&>();
tail = &obj["a"];
}
return value;
};
auto nested = make_nested(10000);
EXPECT_TRUE(tensorstore::internal_json::JsonSame(nested, nested));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/same.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/same_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
03851c60-929c-4402-a61b-1dc7a8631e00 | cpp | google/tensorstore | pprint_python | tensorstore/internal/json/pprint_python.cc | tensorstore/internal/json/pprint_python_test.cc | #include "tensorstore/internal/json/pprint_python.h"
#include <cstddef>
#include <string>
#include <string_view>
#include "absl/strings/escaping.h"
#include <nlohmann/json.hpp>
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_python {
namespace {
void FormatStringForPython(std::string* out, std::string_view s) {
*out += '\'';
*out += absl::CHexEscape(s);
*out += '\'';
}
void FormatAsSingleLineForPython(std::string* out, const ::nlohmann::json& j) {
switch (j.type()) {
case ::nlohmann::json::value_t::object: {
*out += "{";
bool first = true;
for (const auto& [key, value] :
j.get_ref<const ::nlohmann::json::object_t&>()) {
if (!first) {
*out += ", ";
} else {
first = false;
}
FormatStringForPython(out, key);
*out += ": ";
FormatAsSingleLineForPython(out, value);
}
*out += "}";
break;
}
case ::nlohmann::json::value_t::array: {
*out += '[';
bool first = true;
for (const auto& x : j.get_ref<const ::nlohmann::json::array_t&>()) {
if (!first) {
*out += ", ";
} else {
first = false;
}
FormatAsSingleLineForPython(out, x);
}
*out += ']';
break;
}
case ::nlohmann::json::value_t::string: {
FormatStringForPython(out, j.get_ref<const std::string&>());
break;
}
case ::nlohmann::json::value_t::binary: {
auto& s = j.get_ref<const ::nlohmann::json::binary_t&>();
*out += 'b';
FormatStringForPython(
out,
std::string_view(reinterpret_cast<const char*>(s.data()), s.size()));
break;
}
case ::nlohmann::json::value_t::boolean: {
*out += (j.get_ref<const bool&>() ? "True" : "False");
break;
}
case ::nlohmann::json::value_t::null: {
*out += "None";
break;
}
default:
*out += j.dump();
break;
}
}
void PrettyPrintJsonObjectAsPythonInternal(
std::string* out, const ::nlohmann::json::object_t& obj,
PrettyPrintJsonAsPythonOptions options) {
*out += '{';
for (const auto& [key, value] : obj) {
*out += '\n';
auto new_options = options;
new_options.subsequent_indent += options.indent;
new_options.cur_line_indent = new_options.subsequent_indent;
new_options.width -= 1;
out->append(new_options.subsequent_indent, ' ');
size_t prev_size = out->size();
FormatStringForPython(out, key);
size_t key_repr_len = out->size() - prev_size;
*out += ": ";
new_options.cur_line_indent += key_repr_len + 2;
PrettyPrintJsonAsPython(out, value, new_options);
*out += ',';
}
if (!obj.empty()) {
*out += '\n';
out->append(options.subsequent_indent, ' ');
}
*out += '}';
}
void PrettyPrintJsonArrayAsPythonInternal(
std::string* out, const ::nlohmann::json::array_t& arr,
PrettyPrintJsonAsPythonOptions options) {
*out += '[';
auto new_options = options;
new_options.subsequent_indent += options.indent;
new_options.cur_line_indent = new_options.subsequent_indent;
new_options.width -= 1;
for (const auto& value : arr) {
*out += '\n';
out->append(new_options.subsequent_indent, ' ');
PrettyPrintJsonAsPython(out, value, new_options);
*out += ',';
}
if (!arr.empty()) {
*out += '\n';
out->append(options.subsequent_indent, ' ');
}
*out += ']';
}
}
void PrettyPrintJsonAsPython(std::string* out, const ::nlohmann::json& j,
const PrettyPrintJsonAsPythonOptions& options) {
size_t existing_size = out->size();
FormatAsSingleLineForPython(out, j);
std::ptrdiff_t added_size = out->size() - existing_size;
int max_width = options.width - options.cur_line_indent;
if (added_size > max_width) {
if (const auto* obj = j.get_ptr<const ::nlohmann::json::object_t*>()) {
out->resize(existing_size);
PrettyPrintJsonObjectAsPythonInternal(out, *obj, options);
return;
} else if (const auto* arr =
j.get_ptr<const ::nlohmann::json::array_t*>()) {
out->resize(existing_size);
PrettyPrintJsonArrayAsPythonInternal(out, *arr, options);
return;
}
}
}
std::string PrettyPrintJsonAsPython(
const ::nlohmann::json& j, const PrettyPrintJsonAsPythonOptions& options) {
std::string out;
PrettyPrintJsonAsPython(&out, j, options);
return out;
}
std::string PrettyPrintJsonAsPythonRepr(
const Result<::nlohmann::json>& j, std::string_view prefix,
std::string_view suffix, const PrettyPrintJsonAsPythonOptions& options) {
std::string pretty{prefix};
const char* dotdotdot = "...";
if (j.ok()) {
PrettyPrintJsonAsPythonOptions adjusted_options = options;
adjusted_options.width -= suffix.size();
adjusted_options.cur_line_indent += prefix.size();
PrettyPrintJsonAsPython(&pretty, *j, options);
dotdotdot = "";
}
tensorstore::StrAppend(&pretty, dotdotdot, suffix);
return pretty;
}
}
} | #include "tensorstore/internal/json/pprint_python.h"
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::internal_python::PrettyPrintJsonAsPython;
using ::tensorstore::internal_python::PrettyPrintJsonAsPythonRepr;
TEST(PrettyPrintJsonAsPythonTest, Basic) {
EXPECT_EQ("None", PrettyPrintJsonAsPython(::nlohmann::json(nullptr)));
EXPECT_EQ("True", PrettyPrintJsonAsPython(::nlohmann::json(true)));
EXPECT_EQ("False", PrettyPrintJsonAsPython(::nlohmann::json(false)));
EXPECT_EQ("'abc'", PrettyPrintJsonAsPython(::nlohmann::json("abc")));
EXPECT_EQ("b'abc'",
PrettyPrintJsonAsPython(::nlohmann::json(::nlohmann::json::binary_t(
std::vector<uint8_t>{'a', 'b', 'c'}))));
EXPECT_EQ("1", PrettyPrintJsonAsPython(::nlohmann::json(1)));
EXPECT_EQ("1.5", PrettyPrintJsonAsPython(::nlohmann::json(1.5)));
EXPECT_EQ("[1, 2, 3]", PrettyPrintJsonAsPython(::nlohmann::json({1, 2, 3})));
EXPECT_EQ("[1, 2, 3]",
PrettyPrintJsonAsPython(::nlohmann::json({1, 2, 3}),
{2, 9}));
EXPECT_EQ(R"([
1,
2,
3,
])",
PrettyPrintJsonAsPython(::nlohmann::json({1, 2, 3}),
{2, 5}));
EXPECT_EQ("{'a': 1, 'b': 2, 'c': 3}",
PrettyPrintJsonAsPython(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", 3}})));
EXPECT_EQ(
"{'a': 1, 'b': 2, 'c': 3}",
PrettyPrintJsonAsPython(::nlohmann::json({{"a", 1}, {"b", 2}, {"c", 3}}),
{2, 24}));
EXPECT_EQ(
R"({
'a': 1,
'b': 2,
'c': 3,
})",
PrettyPrintJsonAsPython(::nlohmann::json({{"a", 1}, {"b", 2}, {"c", 3}}),
{2, 10}));
EXPECT_EQ(
R"({
'a': 1,
'b': 2,
'c': [
1,
2,
3,
4,
],
})",
PrettyPrintJsonAsPython(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", {1, 2, 3, 4}}}),
{2, 10}));
EXPECT_EQ(
R"({
'a': 1,
'b': 2,
'c': [1, 2, 3, 4],
})",
PrettyPrintJsonAsPython(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", {1, 2, 3, 4}}}),
{2, 21}));
}
TEST(PrettyPrintJsonAsPythonReprTest, Basic) {
EXPECT_EQ("Foo(None)", PrettyPrintJsonAsPythonRepr(::nlohmann::json(nullptr),
"Foo(", ")"));
EXPECT_EQ("Foo(...)",
PrettyPrintJsonAsPythonRepr(absl::UnknownError(""), "Foo(", ")"));
EXPECT_EQ(
R"(Foo({
'a': 1,
'b': 2,
'c': [1, 2, 3, 4],
}))",
PrettyPrintJsonAsPythonRepr(
::nlohmann::json({{"a", 1}, {"b", 2}, {"c", {1, 2, 3, 4}}}), "Foo(",
")", {2, 21}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/pprint_python.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/pprint_python_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c5455352-1ad0-4ff2-9c83-e89ac052dc9c | cpp | google/tensorstore | value_as | tensorstore/internal/json/value_as.cc | tensorstore/internal/json/value_as_test.cc | #include "tensorstore/internal/json/value_as.h"
#include <stdint.h>
#include <cmath>
#include <cstddef>
#include <limits>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json {
absl::Status ExpectedError(const ::nlohmann::json& j,
std::string_view type_name) {
if (j.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected ", type_name, ", but member is missing"));
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ", type_name, ", but received: ", j.dump()));
}
absl::Status ValidationError(const ::nlohmann::json& j,
std::string_view type_name) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Validation of ", type_name, " failed, received: ", j.dump()));
}
template <typename T>
absl::Status JsonRequireIntegerImpl<T>::Execute(const ::nlohmann::json& json,
T* result, bool strict,
T min_value, T max_value) {
if (auto x = JsonValueAs<T>(json, strict)) {
if (*x >= min_value && *x <= max_value) {
*result = *x;
return absl::OkStatus();
}
}
constexpr const char* kTypeName = []() {
if constexpr (sizeof(T) == 4 && std::is_signed_v<T>)
return "32-bit signed integer";
if constexpr (sizeof(T) == 4 && std::is_unsigned_v<T>)
return "32-bit unsigned integer";
if constexpr (sizeof(T) == 8 && std::is_signed_v<T>)
return "64-bit signed integer";
if constexpr (sizeof(T) == 8 && std::is_unsigned_v<T>)
return "64-bit unsigned integer";
return GetTypeName(internal::type_identity_t<T>{});
}();
if constexpr (kTypeName != nullptr) {
if (min_value == std::numeric_limits<T>::min() &&
max_value == std::numeric_limits<T>::max()) {
return internal_json::ExpectedError(json, kTypeName);
}
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected integer in the range [", min_value, ", ",
max_value, "], but received: ", json.dump()));
}
template struct JsonRequireIntegerImpl<int64_t>;
template struct JsonRequireIntegerImpl<uint64_t>;
template <>
std::optional<std::nullptr_t> JsonValueAs<std::nullptr_t>(
const ::nlohmann::json& j, bool strict) {
if (j.is_null()) {
return nullptr;
}
return std::nullopt;
}
template <>
std::optional<bool> JsonValueAs<bool>(const ::nlohmann::json& j, bool strict) {
if (j.is_boolean()) {
return j.get<bool>();
}
if (!strict && j.is_string()) {
const auto& str = j.get_ref<std::string const&>();
if (str == "true") return true;
if (str == "false") return false;
}
return std::nullopt;
}
template <>
std::optional<int64_t> JsonValueAs<int64_t>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number_unsigned()) {
auto x = j.get<uint64_t>();
if (x <= static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
return static_cast<int64_t>(x);
}
} else if (j.is_number_integer()) {
return j.get<int64_t>();
} else if (j.is_number_float()) {
auto x = j.get<double>();
if (x >= -9223372036854775808.0 &&
x < 9223372036854775808.0 && x == std::floor(x)) {
return static_cast<int64_t>(x);
}
} else if (!strict) {
if (j.is_string()) {
int64_t result = 0;
if (absl::SimpleAtoi(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
}
return std::nullopt;
}
template <>
std::optional<uint64_t> JsonValueAs<uint64_t>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number_unsigned()) {
return j.get<uint64_t>();
} else if (j.is_number_integer()) {
int64_t x = j.get<int64_t>();
if (x >= 0) {
return static_cast<uint64_t>(x);
}
} else if (j.is_number_float()) {
double x = j.get<double>();
if (x >= 0.0 && x < 18446744073709551616.0 &&
x == std::floor(x)) {
return static_cast<uint64_t>(x);
}
} else if (!strict) {
if (j.is_string()) {
uint64_t result = 0;
if (absl::SimpleAtoi(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
}
return std::nullopt;
}
template <>
std::optional<double> JsonValueAs<double>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number()) {
return j.get<double>();
}
if (!strict && j.is_string()) {
double result = 0;
if (absl::SimpleAtod(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
return std::nullopt;
}
template <>
std::optional<std::string> JsonValueAs<std::string>(const ::nlohmann::json& j,
bool strict) {
if (j.is_string()) {
return j.get<std::string>();
}
return std::nullopt;
}
}
} | #include "tensorstore/internal/json/value_as.h"
#include <stdint.h>
#include <map>
#include <optional>
#include <set>
#include <string>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_json::JsonRequireInteger;
using ::tensorstore::internal_json::JsonRequireValueAs;
using ::tensorstore::internal_json::JsonValueAs;
template <typename T, bool kStrict = true>
std::optional<T> JsonMemberT(const ::nlohmann::json::object_t& j,
const char* member) {
auto it = j.find(member);
if (it == j.end()) {
return std::nullopt;
}
return JsonValueAs<T>(it->second, kStrict);
}
template <typename T, bool kStrict = true>
std::optional<T> JsonMemberT(const ::nlohmann::json& j, const char* member) {
if (const auto* obj = j.get_ptr<const ::nlohmann::json::object_t*>()) {
return JsonMemberT<T, kStrict>(*obj, member);
}
return std::nullopt;
}
TEST(JsonTest, Meta) {
auto JsonRequireString = [](const ::nlohmann::json& json,
const char* member) -> bool {
auto v = JsonMemberT<std::string>(json, member);
return v.has_value() && !v->empty();
};
auto JsonRequireInt = [](const ::nlohmann::json& json,
const char* member) -> bool {
auto v = JsonMemberT<int64_t, false>(json, member);
return v.has_value();
};
auto meta = ::nlohmann::json::meta();
EXPECT_TRUE(JsonRequireString(meta, "copyright"));
EXPECT_TRUE(JsonRequireString(meta, "name"));
EXPECT_TRUE(JsonRequireString(meta, "url"));
EXPECT_TRUE(JsonRequireString(meta, "platform"));
EXPECT_TRUE(JsonRequireString(meta, "copyright"));
EXPECT_TRUE(meta.find("compiler") != meta.end());
auto compiler = meta["compiler"];
EXPECT_TRUE(JsonRequireString(compiler, "c++"));
EXPECT_FALSE(JsonRequireString(meta, "version"));
auto version = meta["version"];
EXPECT_TRUE(JsonRequireInt(version, "major"));
}
::nlohmann::json GetDefaultJSON() {
return ::nlohmann::json{
{"bool_true", true}, {"bool_false", false}, {"str_bool", "true"},
{"signed", 456}, {"neg_signed", -567}, {"unsigned", 565u},
{"float", 456.789}, {"neg_float", -678.91}, {"int_float", 122.0},
{"str", "abc"}, {"str_number", "789"}, {"str_float", "123.40"},
{"nil", nullptr}, {"empty_obj", {}}, {"obj", {"a", 1}},
};
}
std::set<std::string> GetKeys() {
return std::set<std::string>{{
"bool_true",
"bool_false",
"str_bool",
"signed",
"neg_signed",
"unsigned",
"float",
"neg_float",
"int_float",
"str",
"abc",
"str_number",
"str_float",
"nil",
"empty_obj",
"obj",
"missing",
}};
}
TEST(JsonTest, JsonParseBool) {
auto keys = GetKeys();
auto JsonParseBool = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<bool, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseBool(result, "bool_true"));
EXPECT_EQ(true, *JsonParseBool(result, "bool_true"));
ASSERT_TRUE(JsonParseBool(result, "bool_false"));
EXPECT_EQ(false, *JsonParseBool(result, "bool_false"));
ASSERT_TRUE(JsonParseBool(result, "str_bool"));
EXPECT_EQ(true, *JsonParseBool(result, "str_bool"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseBool(result, x.c_str())) << x;
}
EXPECT_EQ(std::nullopt, JsonValueAs<bool>(::nlohmann::json("a")));
EXPECT_EQ(false, JsonValueAs<bool>(::nlohmann::json("false")));
EXPECT_EQ(true, JsonValueAs<bool>(::nlohmann::json("true")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt, JsonValueAs<bool>(::nlohmann::json("true"), kStrict));
EXPECT_EQ(true, JsonValueAs<bool>(::nlohmann::json(true), kStrict));
EXPECT_EQ(false, JsonValueAs<bool>(::nlohmann::json(false), kStrict));
}
TEST(JsonValueAsTest, Int64FromUint64) {
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(0x8fffffffffffffffu)));
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(0xffffffffffffffffu)));
EXPECT_EQ(0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json(0x7fffffffffffffffu)));
const bool kStrict = true;
EXPECT_EQ(
0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json(0x7fffffffffffffffu), kStrict));
}
TEST(JsonValueAsTest, Int64FromDouble) {
EXPECT_EQ(std::nullopt, JsonValueAs<int64_t>(::nlohmann::json(0.5)));
EXPECT_EQ(1, JsonValueAs<int64_t>(::nlohmann::json(1.0)));
EXPECT_EQ(
std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(9223372036854775808.0 )));
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(-9223372036854777856.0)));
EXPECT_EQ(9223372036854774784,
JsonValueAs<int64_t>(::nlohmann::json(9223372036854774784.0)));
EXPECT_EQ(
-0x8000000000000000,
JsonValueAs<int64_t>(::nlohmann::json(-9223372036854775808.0 )));
}
TEST(JsonValueAsTest, Int64FromString) {
EXPECT_EQ(-1, JsonValueAs<int64_t>(::nlohmann::json("-1")));
EXPECT_EQ(-0x8000000000000000,
JsonValueAs<int64_t>(::nlohmann::json("-9223372036854775808")));
EXPECT_EQ(0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json("9223372036854775807")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0.0")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0a")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0x0")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0xf")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("9223372036854775808")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("-9223372036854775809")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json("-1"), kStrict));
}
TEST(JsonValueAsTest, Uint64FromDouble) {
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(0.5)));
EXPECT_EQ(1, JsonValueAs<uint64_t>(::nlohmann::json(1.0)));
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(
18446744073709551616.0 )));
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(-1.0)));
EXPECT_EQ(18446744073709549568u,
JsonValueAs<uint64_t>(::nlohmann::json(18446744073709549568.0)));
}
TEST(JsonValueAsTest, Uint64FromString) {
EXPECT_EQ(0xffffffffffffffffu,
JsonValueAs<uint64_t>(::nlohmann::json("18446744073709551615")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0.0")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0a")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0x0")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0xf")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("-1")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt,
JsonValueAs<uint64_t>(::nlohmann::json("1"), kStrict));
}
TEST(JsonTest, JsonParseInt) {
auto keys = GetKeys();
auto JsonParseInt = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<int64_t, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseInt(result, "signed"));
EXPECT_EQ(456, *JsonParseInt(result, "signed"));
ASSERT_TRUE(JsonParseInt(result, "neg_signed"));
EXPECT_EQ(-567, *JsonParseInt(result, "neg_signed"));
ASSERT_TRUE(JsonParseInt(result, "unsigned"));
EXPECT_EQ(565, *JsonParseInt(result, "unsigned"));
ASSERT_TRUE(JsonParseInt(result, "int_float"));
EXPECT_EQ(122, *JsonParseInt(result, "int_float"));
ASSERT_TRUE(JsonParseInt(result, "str_number"));
EXPECT_EQ(789, *JsonParseInt(result, "str_number"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseInt(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseUnsigned) {
auto keys = GetKeys();
auto JsonParseUnsigned = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<uint64_t, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseUnsigned(result, "signed"));
EXPECT_EQ(456, *JsonParseUnsigned(result, "signed"));
ASSERT_TRUE(JsonParseUnsigned(result, "unsigned"));
EXPECT_EQ(565, *JsonParseUnsigned(result, "unsigned"));
ASSERT_TRUE(JsonParseUnsigned(result, "int_float"));
EXPECT_EQ(122, *JsonParseUnsigned(result, "int_float"));
ASSERT_TRUE(JsonParseUnsigned(result, "str_number"));
EXPECT_EQ(789, *JsonParseUnsigned(result, "str_number"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseUnsigned(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseDouble) {
auto keys = GetKeys();
auto JsonParseDouble = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<double, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseDouble(result, "signed"));
EXPECT_EQ(456, *JsonParseDouble(result, "signed"));
ASSERT_TRUE(JsonParseDouble(result, "neg_signed"));
EXPECT_EQ(-567, *JsonParseDouble(result, "neg_signed"));
ASSERT_TRUE(JsonParseDouble(result, "unsigned"));
EXPECT_EQ(565, *JsonParseDouble(result, "unsigned"));
ASSERT_TRUE(JsonParseDouble(result, "float"));
EXPECT_EQ(456.789, *JsonParseDouble(result, "float"));
ASSERT_TRUE(JsonParseDouble(result, "neg_float"));
EXPECT_EQ(-678.91, *JsonParseDouble(result, "neg_float"));
ASSERT_TRUE(JsonParseDouble(result, "int_float"));
EXPECT_EQ(122, *JsonParseDouble(result, "int_float"));
ASSERT_TRUE(JsonParseDouble(result, "str_number"));
EXPECT_EQ(789, *JsonParseDouble(result, "str_number"));
ASSERT_TRUE(JsonParseDouble(result, "str_float"));
EXPECT_EQ(123.4, *JsonParseDouble(result, "str_float"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseDouble(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseString) {
auto keys = GetKeys();
auto JsonParseString = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<std::string>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseString(result, "str_bool"));
EXPECT_EQ("true", *JsonParseString(result, "str_bool"));
ASSERT_TRUE(JsonParseString(result, "str"));
EXPECT_EQ("abc", *JsonParseString(result, "str"));
ASSERT_TRUE(JsonParseString(result, "str_number"));
EXPECT_EQ("789", *JsonParseString(result, "str_number"));
ASSERT_TRUE(JsonParseString(result, "str_float"));
EXPECT_EQ("123.40", *JsonParseString(result, "str_float"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseString(result, x.c_str())) << x;
}
}
TEST(JsonRequireValueAs, Success) {
{
bool v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(true), &v, true).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("true"), &v, false).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("true"), &v, [](bool) {
return true;
}).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(
JsonRequireValueAs<bool>(::nlohmann::json(true), nullptr, true).ok());
}
{
int64_t v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(-3), &v, true).ok());
EXPECT_EQ(-3, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(-4.0), &v, false).ok());
EXPECT_EQ(-4, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("-5"), &v, false).ok());
EXPECT_EQ(-5, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("-5"), &v, [](int64_t) {
return true;
}).ok());
EXPECT_EQ(-5, v);
EXPECT_TRUE(
JsonRequireValueAs<int64_t>(::nlohmann::json(-3), nullptr, true).ok());
}
{
uint64_t v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(6), &v, true).ok());
EXPECT_EQ(6, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(7.0), &v, false).ok());
EXPECT_EQ(7, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("8"), &v, false).ok());
EXPECT_EQ(8, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("8"), &v, [](uint64_t) {
return true;
}).ok());
EXPECT_EQ(8, v);
EXPECT_TRUE(
JsonRequireValueAs<uint64_t>(::nlohmann::json(3), nullptr, true).ok());
}
{
double v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(0.5), &v, true).ok());
EXPECT_EQ(0.5, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("2.0"), &v, false).ok());
EXPECT_EQ(2.0, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("2.0"), &v, [](double) {
return true;
}).ok());
EXPECT_EQ(2.0, v);
EXPECT_TRUE(
JsonRequireValueAs<double>(::nlohmann::json(3.0), nullptr, true).ok());
}
{
std::string v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("x"), &v, false).ok());
EXPECT_EQ("x", v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("y"), &v, [](std::string) {
return true;
}).ok());
EXPECT_EQ("y", v);
EXPECT_TRUE(
JsonRequireValueAs<std::string>(::nlohmann::json("z"), nullptr, true)
.ok());
}
}
TEST(JsonRequireValueAs, Failure) {
{
bool v;
EXPECT_THAT(JsonRequireValueAs(::nlohmann::json("true"), &v, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected boolean, but received: \"true\""));
}
EXPECT_THAT(JsonRequireValueAs<bool>(::nlohmann::json("true"), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected boolean, but received: \"true\""));
EXPECT_THAT(JsonRequireValueAs<bool>(::nlohmann::json(true), nullptr,
[](bool) { return false; }),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Validation of boolean failed, received: true"));
EXPECT_THAT(
JsonRequireValueAs<int64_t>(::nlohmann::json("true"), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 64-bit signed integer, but received: \"true\""));
EXPECT_THAT(
JsonRequireValueAs<uint64_t>(::nlohmann::json(3.5), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 64-bit unsigned integer, but received: 3.5"));
EXPECT_THAT(
JsonRequireValueAs<std::string>(::nlohmann::json(true), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string, but received: true"));
}
TEST(JsonRequireIntegerTest, Success) {
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-5), &result_int32,
true, -7, -3));
EXPECT_EQ(-5, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-7), &result_int32,
true, -7, -3));
EXPECT_EQ(-7, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json("-7"), &result_int32,
false, -7, -3));
EXPECT_EQ(-7, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-3), &result_int32,
true, -7, -3));
EXPECT_EQ(-3, result_int32);
}
{
uint32_t result_uint32 = 42;
EXPECT_EQ(absl::OkStatus(),
JsonRequireInteger(::nlohmann::json(5), &result_uint32,
true, 2, 7));
EXPECT_EQ(5u, result_uint32);
}
{
std::int16_t result_int16 = 42;
EXPECT_EQ(absl::OkStatus(),
JsonRequireInteger(::nlohmann::json(5), &result_int16,
true, 2, 7));
EXPECT_EQ(5, result_int16);
}
}
TEST(JsonRequireIntegerTest, Failure) {
{
std::int32_t result_int32 = 42;
EXPECT_THAT(
JsonRequireInteger(::nlohmann::json(-2), &result_int32, true,
-7, -3),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[-7, -3\\], but received: -2"));
EXPECT_EQ(42, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_THAT(JsonRequireInteger(::nlohmann::json(true), &result_int32,
true, -7, -3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[-7, -3\\], but "
"received: true"));
EXPECT_EQ(42, result_int32);
}
{
uint32_t result_uint32 = 42;
EXPECT_THAT(
JsonRequireInteger(::nlohmann::json(11), &result_uint32,
true, 5, 10),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[5, 10\\], but received: 11"));
EXPECT_EQ(42u, result_uint32);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/value_as.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/value_as_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |